repo_name
stringclasses 6
values | pr_number
int64 512
78.9k
| pr_title
stringlengths 3
144
| pr_description
stringlengths 0
30.3k
| author
stringlengths 2
21
| date_created
timestamp[ns, tz=UTC] | date_merged
timestamp[ns, tz=UTC] | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 17
30.4k
| filepath
stringlengths 9
210
| before_content
stringlengths 0
112M
| after_content
stringlengths 0
112M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ConvertToInt32RoundToPositiveInfinity.Vector64.Single.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ConvertToInt32RoundToPositiveInfinity_Vector64_Single()
{
var test = new SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Single[] inArray1, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Single> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>());
return testStruct;
}
public void RunStructFldScenario(SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single testClass)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single testClass)
{
fixed (Vector64<Single>* pFld1 = &_fld1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pFld1))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static Single[] _data1 = new Single[Op1ElementCount];
private static Vector64<Single> _clsVar1;
private Vector64<Single> _fld1;
private DataTable _dataTable;
static SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>());
}
public SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
_dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ConvertToInt32RoundToPositiveInfinity), new Type[] { typeof(Vector64<Single>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ConvertToInt32RoundToPositiveInfinity), new Type[] { typeof(Vector64<Single>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Single>* pClsVar1 = &_clsVar1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pClsVar1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr);
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single();
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single();
fixed (Vector64<Single>* pFld1 = &test._fld1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Single>* pFld1 = &_fld1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(&test._fld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Single> op1, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Single>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(Single[] firstOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ConvertToInt32RoundToPositiveInfinity(firstOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ConvertToInt32RoundToPositiveInfinity)}<Int32>(Vector64<Single>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ConvertToInt32RoundToPositiveInfinity_Vector64_Single()
{
var test = new SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Single[] inArray1, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Single> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>());
return testStruct;
}
public void RunStructFldScenario(SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single testClass)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single testClass)
{
fixed (Vector64<Single>* pFld1 = &_fld1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pFld1))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static Single[] _data1 = new Single[Op1ElementCount];
private static Vector64<Single> _clsVar1;
private Vector64<Single> _fld1;
private DataTable _dataTable;
static SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>());
}
public SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
_dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ConvertToInt32RoundToPositiveInfinity), new Type[] { typeof(Vector64<Single>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ConvertToInt32RoundToPositiveInfinity), new Type[] { typeof(Vector64<Single>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Single>* pClsVar1 = &_clsVar1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pClsVar1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr);
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single();
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleUnaryOpTest__ConvertToInt32RoundToPositiveInfinity_Vector64_Single();
fixed (Vector64<Single>* pFld1 = &test._fld1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Single>* pFld1 = &_fld1)
{
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ConvertToInt32RoundToPositiveInfinity(
AdvSimd.LoadVector64((Single*)(&test._fld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Single> op1, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Single>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(Single[] firstOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ConvertToInt32RoundToPositiveInfinity(firstOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ConvertToInt32RoundToPositiveInfinity)}<Int32>(Vector64<Single>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Data.Common/src/System/Xml/TreeIterator.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
#pragma warning disable 0618 // ignore obsolete warning about XmlDataDocument
namespace System.Xml
{
// Iterates over non-attribute nodes
internal sealed class TreeIterator : BaseTreeIterator
{
private readonly XmlNode _nodeTop;
private XmlNode _currentNode;
internal TreeIterator(XmlNode nodeTop) : base(((XmlDataDocument)(nodeTop.OwnerDocument!)).Mapper)
{
Debug.Assert(nodeTop != null);
_nodeTop = nodeTop;
_currentNode = nodeTop;
}
internal override XmlNode? CurrentNode => _currentNode;
internal override bool Next()
{
XmlNode? nextNode;
// Try to move to the first child
nextNode = _currentNode.FirstChild;
// No children, try next sibling
if (nextNode != null)
{
_currentNode = nextNode;
return true;
}
return NextRight();
}
internal override bool NextRight()
{
// Make sure we do not get past the nodeTop if we call NextRight on a just initialized iterator and nodeTop has no children
if (_currentNode == _nodeTop)
{
_currentNode = null!;
return false;
}
XmlNode? nextNode = _currentNode.NextSibling;
if (nextNode != null)
{
_currentNode = nextNode;
return true;
}
// No next sibling, try the first sibling of from the parent chain
nextNode = _currentNode;
while (nextNode != _nodeTop && nextNode!.NextSibling == null)
{
nextNode = nextNode.ParentNode;
}
if (nextNode == _nodeTop)
{
_currentNode = null!;
return false;
}
Debug.Assert(nextNode.NextSibling != null);
_currentNode = nextNode.NextSibling;
return true;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
#pragma warning disable 0618 // ignore obsolete warning about XmlDataDocument
namespace System.Xml
{
// Iterates over non-attribute nodes
internal sealed class TreeIterator : BaseTreeIterator
{
private readonly XmlNode _nodeTop;
private XmlNode _currentNode;
internal TreeIterator(XmlNode nodeTop) : base(((XmlDataDocument)(nodeTop.OwnerDocument!)).Mapper)
{
Debug.Assert(nodeTop != null);
_nodeTop = nodeTop;
_currentNode = nodeTop;
}
internal override XmlNode? CurrentNode => _currentNode;
internal override bool Next()
{
XmlNode? nextNode;
// Try to move to the first child
nextNode = _currentNode.FirstChild;
// No children, try next sibling
if (nextNode != null)
{
_currentNode = nextNode;
return true;
}
return NextRight();
}
internal override bool NextRight()
{
// Make sure we do not get past the nodeTop if we call NextRight on a just initialized iterator and nodeTop has no children
if (_currentNode == _nodeTop)
{
_currentNode = null!;
return false;
}
XmlNode? nextNode = _currentNode.NextSibling;
if (nextNode != null)
{
_currentNode = nextNode;
return true;
}
// No next sibling, try the first sibling of from the parent chain
nextNode = _currentNode;
while (nextNode != _nodeTop && nextNode!.NextSibling == null)
{
nextNode = nextNode.ParentNode;
}
if (nextNode == _nodeTop)
{
_currentNode = null!;
return false;
}
Debug.Assert(nextNode.NextSibling != null);
_currentNode = nextNode.NextSibling;
return true;
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/NegateSaturate.Vector128.Int64.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void NegateSaturate_Vector128_Int64()
{
var test = new SimpleUnaryOpTest__NegateSaturate_Vector128_Int64();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleUnaryOpTest__NegateSaturate_Vector128_Int64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int64> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
return testStruct;
}
public void RunStructFldScenario(SimpleUnaryOpTest__NegateSaturate_Vector128_Int64 testClass)
{
var result = AdvSimd.Arm64.NegateSaturate(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleUnaryOpTest__NegateSaturate_Vector128_Int64 testClass)
{
fixed (Vector128<Int64>* pFld1 = &_fld1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pFld1))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Vector128<Int64> _clsVar1;
private Vector128<Int64> _fld1;
private DataTable _dataTable;
static SimpleUnaryOpTest__NegateSaturate_Vector128_Int64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
}
public SimpleUnaryOpTest__NegateSaturate_Vector128_Int64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
_dataTable = new DataTable(_data1, new Int64[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.Arm64.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.Arm64.NegateSaturate(
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturate), new Type[] { typeof(Vector128<Int64>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturate), new Type[] { typeof(Vector128<Int64>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.Arm64.NegateSaturate(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int64>* pClsVar1 = &_clsVar1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pClsVar1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr);
var result = AdvSimd.Arm64.NegateSaturate(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr));
var result = AdvSimd.Arm64.NegateSaturate(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleUnaryOpTest__NegateSaturate_Vector128_Int64();
var result = AdvSimd.Arm64.NegateSaturate(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleUnaryOpTest__NegateSaturate_Vector128_Int64();
fixed (Vector128<Int64>* pFld1 = &test._fld1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.Arm64.NegateSaturate(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int64>* pFld1 = &_fld1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.NegateSaturate(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(&test._fld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int64> op1, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(Int64[] firstOp, Int64[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.NegateSaturate(firstOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.NegateSaturate)}<Int64>(Vector128<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void NegateSaturate_Vector128_Int64()
{
var test = new SimpleUnaryOpTest__NegateSaturate_Vector128_Int64();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleUnaryOpTest__NegateSaturate_Vector128_Int64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int64> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
return testStruct;
}
public void RunStructFldScenario(SimpleUnaryOpTest__NegateSaturate_Vector128_Int64 testClass)
{
var result = AdvSimd.Arm64.NegateSaturate(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleUnaryOpTest__NegateSaturate_Vector128_Int64 testClass)
{
fixed (Vector128<Int64>* pFld1 = &_fld1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pFld1))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Vector128<Int64> _clsVar1;
private Vector128<Int64> _fld1;
private DataTable _dataTable;
static SimpleUnaryOpTest__NegateSaturate_Vector128_Int64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
}
public SimpleUnaryOpTest__NegateSaturate_Vector128_Int64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Int64.MinValue; }
_dataTable = new DataTable(_data1, new Int64[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.Arm64.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.Arm64.NegateSaturate(
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturate), new Type[] { typeof(Vector128<Int64>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturate), new Type[] { typeof(Vector128<Int64>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.Arm64.NegateSaturate(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int64>* pClsVar1 = &_clsVar1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pClsVar1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr);
var result = AdvSimd.Arm64.NegateSaturate(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr));
var result = AdvSimd.Arm64.NegateSaturate(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleUnaryOpTest__NegateSaturate_Vector128_Int64();
var result = AdvSimd.Arm64.NegateSaturate(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleUnaryOpTest__NegateSaturate_Vector128_Int64();
fixed (Vector128<Int64>* pFld1 = &test._fld1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.Arm64.NegateSaturate(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int64>* pFld1 = &_fld1)
{
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(pFld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.NegateSaturate(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.NegateSaturate(
AdvSimd.LoadVector128((Int64*)(&test._fld1))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int64> op1, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(Int64[] firstOp, Int64[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.NegateSaturate(firstOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.NegateSaturate)}<Int64>(Vector128<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b28776/b28776.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
namespace Test
{
using System;
class AA
{
public int[] m_anField1 = (new int[7]);
public bool[] m_abField2 = (new bool[7]);
public float[] m_afField3 = (new float[7]);
public static void Method1()
{
object local1 = 0.0;
AA[] local2 = new AA[7];
while (true)
{
local2[2].m_anField1 = new AA().m_anField1;
AA a = new AA();
while (a.m_abField2[2]) { }
while ((double)a.m_afField3[2] >= (double)local1) { }
}
}
static int Main()
{
try
{
Console.WriteLine("Testing AA::Method1");
AA.Method1();
}
catch (Exception)
{
Console.WriteLine("Exception handled.");
}
return 100;
}
}
}
/*
---------------------------
Assert Failure (PID 1204, Thread 1056/420)
---------------------------
pPrevNestedInfo
.\excep.cpp, Line: 2322
---------------------------
COMPlusNestedExceptionHandler(_EXCEPTION_RECORD * 0x0012e574, _EXCEPTION_REGISTRATION_RECORD * 0x0012f430, _CONTEXT * 0x0012e2a8, void * 0x0012e61c) line 2322 + 31 bytes
NTDLL! 77f92538()
NTDLL! 77f861f8()
KERNEL32! 77ea13f5()
KERNEL32! 77ea146c()
NTDLL! 77f92538()
NTDLL! 77f8af89()
NTDLL! 77f9fb9a()
EEJitManager::ResumeAtJitEH(CrawlFrame * 0x0012ee84, unsigned long 36, unsigned long 0, Thread * 0x00135120, int 1) line 845
COMPlusUnwindCallback(CrawlFrame * 0x0012ee84, ThrowCallbackType * 0x0012f3b0) line 1918 + 39 bytes
Thread::StackWalkFramesEx(_REGDISPLAY * 0x0012efe4, StackWalkAction (CrawlFrame *, void *)* 0x6d0f6fd0 COMPlusUnwindCallback(CrawlFrame *, ThrowCallbackType *), void * 0x0012f3b0, unsigned int 3, Frame * 0x0012f3fc) line 206 + 16 bytes
Thread::StackWalkFrames(StackWalkAction (CrawlFrame *, void *)* 0x6d0f6fd0 COMPlusUnwindCallback(CrawlFrame *, ThrowCallbackType *), void * 0x0012f3b0, unsigned int 3, Frame * 0x0012f3fc) line 457 + 34 bytes
UnwindFrames(Thread * 0x00135120, ThrowCallbackType * 0x0012f3b0) line 1425
CPFH_RealFirstPassHandler(_EXCEPTION_RECORD * 0x0012f5fc, _EXCEPTION_REGISTRATION_RECORD * 0x0012f9dc, _CONTEXT * 0x0012f618, void * 0x0012f5d4, int 0) line 950
CPFH_FirstPassHandler(_EXCEPTION_RECORD * 0x0012f5fc, _EXCEPTION_REGISTRATION_RECORD * 0x0012f9dc, _CONTEXT * 0x0012f618, void * 0x0012f5d4) line 1092 + 25 bytes
COMPlusFrameHandler(_EXCEPTION_RECORD * 0x0012f5fc, _EXCEPTION_REGISTRATION_RECORD * 0x0012f9dc, _CONTEXT * 0x0012f618, void * 0x0012f5d4) line 1223 + 21 bytes
NTDLL! 77f92538()
NTDLL! 77f8af89()
NTDLL! 77f9fb9a()
*/
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
namespace Test
{
using System;
class AA
{
public int[] m_anField1 = (new int[7]);
public bool[] m_abField2 = (new bool[7]);
public float[] m_afField3 = (new float[7]);
public static void Method1()
{
object local1 = 0.0;
AA[] local2 = new AA[7];
while (true)
{
local2[2].m_anField1 = new AA().m_anField1;
AA a = new AA();
while (a.m_abField2[2]) { }
while ((double)a.m_afField3[2] >= (double)local1) { }
}
}
static int Main()
{
try
{
Console.WriteLine("Testing AA::Method1");
AA.Method1();
}
catch (Exception)
{
Console.WriteLine("Exception handled.");
}
return 100;
}
}
}
/*
---------------------------
Assert Failure (PID 1204, Thread 1056/420)
---------------------------
pPrevNestedInfo
.\excep.cpp, Line: 2322
---------------------------
COMPlusNestedExceptionHandler(_EXCEPTION_RECORD * 0x0012e574, _EXCEPTION_REGISTRATION_RECORD * 0x0012f430, _CONTEXT * 0x0012e2a8, void * 0x0012e61c) line 2322 + 31 bytes
NTDLL! 77f92538()
NTDLL! 77f861f8()
KERNEL32! 77ea13f5()
KERNEL32! 77ea146c()
NTDLL! 77f92538()
NTDLL! 77f8af89()
NTDLL! 77f9fb9a()
EEJitManager::ResumeAtJitEH(CrawlFrame * 0x0012ee84, unsigned long 36, unsigned long 0, Thread * 0x00135120, int 1) line 845
COMPlusUnwindCallback(CrawlFrame * 0x0012ee84, ThrowCallbackType * 0x0012f3b0) line 1918 + 39 bytes
Thread::StackWalkFramesEx(_REGDISPLAY * 0x0012efe4, StackWalkAction (CrawlFrame *, void *)* 0x6d0f6fd0 COMPlusUnwindCallback(CrawlFrame *, ThrowCallbackType *), void * 0x0012f3b0, unsigned int 3, Frame * 0x0012f3fc) line 206 + 16 bytes
Thread::StackWalkFrames(StackWalkAction (CrawlFrame *, void *)* 0x6d0f6fd0 COMPlusUnwindCallback(CrawlFrame *, ThrowCallbackType *), void * 0x0012f3b0, unsigned int 3, Frame * 0x0012f3fc) line 457 + 34 bytes
UnwindFrames(Thread * 0x00135120, ThrowCallbackType * 0x0012f3b0) line 1425
CPFH_RealFirstPassHandler(_EXCEPTION_RECORD * 0x0012f5fc, _EXCEPTION_REGISTRATION_RECORD * 0x0012f9dc, _CONTEXT * 0x0012f618, void * 0x0012f5d4, int 0) line 950
CPFH_FirstPassHandler(_EXCEPTION_RECORD * 0x0012f5fc, _EXCEPTION_REGISTRATION_RECORD * 0x0012f9dc, _CONTEXT * 0x0012f618, void * 0x0012f5d4) line 1092 + 25 bytes
COMPlusFrameHandler(_EXCEPTION_RECORD * 0x0012f5fc, _EXCEPTION_REGISTRATION_RECORD * 0x0012f9dc, _CONTEXT * 0x0012f618, void * 0x0012f5d4) line 1223 + 21 bytes
NTDLL! 77f92538()
NTDLL! 77f8af89()
NTDLL! 77f9fb9a()
*/
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/Interop/PInvoke/Generics/GenericsTest.ReadOnlySpanC.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
using Xunit;
unsafe partial class GenericsNative
{
[DllImport(nameof(GenericsNative), EntryPoint = "GetSpanC")]
public static extern ReadOnlySpan<char> GetReadOnlySpanC(char e00);
[DllImport(nameof(GenericsNative), EntryPoint = "GetSpanCOut")]
public static extern void GetReadOnlySpanCOut(char e00, out ReadOnlySpan<char> value);
[DllImport(nameof(GenericsNative), EntryPoint = "GetSpanCPtr")]
public static extern ref readonly ReadOnlySpan<char> GetReadOnlySpanCRef(char e00);
[DllImport(nameof(GenericsNative), EntryPoint = "AddSpanC")]
public static extern ReadOnlySpan<char> AddReadOnlySpanC(ReadOnlySpan<char> lhs, ReadOnlySpan<char> rhs);
[DllImport(nameof(GenericsNative), EntryPoint = "AddSpanCs")]
public static extern ReadOnlySpan<char> AddReadOnlySpanCs(in ReadOnlySpan<char> pValues, int count);
}
unsafe partial class GenericsTest
{
private static void TestReadOnlySpanC()
{
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetReadOnlySpanC('1'));
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetReadOnlySpanCOut('1', out ReadOnlySpan<char> value3));
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetReadOnlySpanCRef('1'));
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddReadOnlySpanC(default, default));
Assert.Throws<MarshalDirectiveException>(() => {
ReadOnlySpan<char> value = default;
GenericsNative.AddReadOnlySpanCs(in value, 1);
});
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
using Xunit;
unsafe partial class GenericsNative
{
[DllImport(nameof(GenericsNative), EntryPoint = "GetSpanC")]
public static extern ReadOnlySpan<char> GetReadOnlySpanC(char e00);
[DllImport(nameof(GenericsNative), EntryPoint = "GetSpanCOut")]
public static extern void GetReadOnlySpanCOut(char e00, out ReadOnlySpan<char> value);
[DllImport(nameof(GenericsNative), EntryPoint = "GetSpanCPtr")]
public static extern ref readonly ReadOnlySpan<char> GetReadOnlySpanCRef(char e00);
[DllImport(nameof(GenericsNative), EntryPoint = "AddSpanC")]
public static extern ReadOnlySpan<char> AddReadOnlySpanC(ReadOnlySpan<char> lhs, ReadOnlySpan<char> rhs);
[DllImport(nameof(GenericsNative), EntryPoint = "AddSpanCs")]
public static extern ReadOnlySpan<char> AddReadOnlySpanCs(in ReadOnlySpan<char> pValues, int count);
}
unsafe partial class GenericsTest
{
private static void TestReadOnlySpanC()
{
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetReadOnlySpanC('1'));
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetReadOnlySpanCOut('1', out ReadOnlySpan<char> value3));
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetReadOnlySpanCRef('1'));
Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddReadOnlySpanC(default, default));
Assert.Throws<MarshalDirectiveException>(() => {
ReadOnlySpan<char> value = default;
GenericsNative.AddReadOnlySpanCs(in value, 1);
});
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Runtime.Loader/tests/ApplyUpdate/System.Reflection.Metadata.ApplyUpdate.Test.AddLambdaCapturingThis/AddLambdaCapturingThis_v1.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
namespace System.Reflection.Metadata.ApplyUpdate.Test
{
public class AddLambdaCapturingThis
{
public AddLambdaCapturingThis()
{
field = "abcd";
}
public string GetField => field;
private string field;
public string TestMethod()
{
// capture 'this' but no locals
Func<string,string> fn = s => field;
Func<string,string> fn2 = s => "42" + s + field;
return fn2 ("123");
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
namespace System.Reflection.Metadata.ApplyUpdate.Test
{
public class AddLambdaCapturingThis
{
public AddLambdaCapturingThis()
{
field = "abcd";
}
public string GetField => field;
private string field;
public string TestMethod()
{
// capture 'this' but no locals
Func<string,string> fn = s => field;
Func<string,string> fn2 = s => "42" + s + field;
return fn2 ("123");
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/HardwareIntrinsics/General/Vector64_1/op_Addition.Int64.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void op_AdditionInt64()
{
var test = new VectorBinaryOpTest__op_AdditionInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__op_AdditionInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int64> _fld1;
public Vector64<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__op_AdditionInt64 testClass)
{
var result = _fld1 + _fld2;
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector64<Int64> _clsVar1;
private static Vector64<Int64> _clsVar2;
private Vector64<Int64> _fld1;
private Vector64<Int64> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__op_AdditionInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
}
public VectorBinaryOpTest__op_AdditionInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr) + Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Vector64<Int64>).GetMethod("op_Addition", new Type[] { typeof(Vector64<Int64>), typeof(Vector64<Int64>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = _clsVar1 + _clsVar2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr);
var result = op1 + op2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__op_AdditionInt64();
var result = test._fld1 + test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = _fld1 + _fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = test._fld1 + test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<Int64> op1, Vector64<Int64> op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (long)(left[0] + right[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (long)(left[i] + right[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.op_Addition<Int64>(Vector64<Int64>, Vector64<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void op_AdditionInt64()
{
var test = new VectorBinaryOpTest__op_AdditionInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__op_AdditionInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int64> _fld1;
public Vector64<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__op_AdditionInt64 testClass)
{
var result = _fld1 + _fld2;
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector64<Int64> _clsVar1;
private static Vector64<Int64> _clsVar2;
private Vector64<Int64> _fld1;
private Vector64<Int64> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__op_AdditionInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
}
public VectorBinaryOpTest__op_AdditionInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr) + Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Vector64<Int64>).GetMethod("op_Addition", new Type[] { typeof(Vector64<Int64>), typeof(Vector64<Int64>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = _clsVar1 + _clsVar2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr);
var result = op1 + op2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__op_AdditionInt64();
var result = test._fld1 + test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = _fld1 + _fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = test._fld1 + test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<Int64> op1, Vector64<Int64> op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (long)(left[0] + right[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (long)(left[i] + right[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.op_Addition<Int64>(Vector64<Int64>, Vector64<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/Microsoft.VisualBasic.Core/src/Microsoft/VisualBasic/FileIO/TextFieldParser.vb | ' Licensed to the .NET Foundation under one or more agreements.
' The .NET Foundation licenses this file to you under the MIT license.
Option Explicit On
Option Strict On
Imports System
Imports System.ComponentModel
Imports System.Diagnostics
Imports System.Globalization
Imports System.IO
Imports System.Text
Imports System.Text.RegularExpressions
Imports Microsoft.VisualBasic.CompilerServices.ExceptionUtils
Namespace Microsoft.VisualBasic.FileIO
''' <summary>
''' Enables parsing very large delimited or fixed width field files
''' </summary>
''' <remarks></remarks>
Public Class TextFieldParser
Implements IDisposable
''' <summary>
''' Creates a new TextFieldParser to parse the passed in file
''' </summary>
''' <param name="path">The path of the file to be parsed</param>
''' <remarks></remarks>
Public Sub New(ByVal path As String)
' Default to UTF-8 and detect encoding
InitializeFromPath(path, System.Text.Encoding.UTF8, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse the passed in file
''' </summary>
''' <param name="path">The path of the file to be parsed</param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <remarks></remarks>
Public Sub New(ByVal path As String, ByVal defaultEncoding As System.Text.Encoding)
' Default to detect encoding
InitializeFromPath(path, defaultEncoding, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse the passed in file
''' </summary>
''' <param name="path">The path of the file to be parsed</param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <param name="detectEncoding">Indicates whether or not to try to detect the encoding from the BOM</param>
''' <remarks></remarks>
Public Sub New(ByVal path As String, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
InitializeFromPath(path, defaultEncoding, detectEncoding)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream)
' Default to UTF-8 and detect encoding
InitializeFromStream(stream, System.Text.Encoding.UTF8, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding)
' Default to detect encoding
InitializeFromStream(stream, defaultEncoding, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <param name="detectEncoding">Indicates whether or not to try to detect the encoding from the BOM</param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
InitializeFromStream(stream, defaultEncoding, detectEncoding)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <param name="detectEncoding">Indicates whether or not to try to detect the encoding from the BOM</param>
''' <param name="leaveOpen">Indicates whether or not to leave the passed in stream open</param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean, ByVal leaveOpen As Boolean)
m_LeaveOpen = leaveOpen
InitializeFromStream(stream, defaultEncoding, detectEncoding)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a stream or file represented by the passed in TextReader
''' </summary>
''' <param name="reader">The TextReader that does the reading</param>
''' <remarks></remarks>
Public Sub New(ByVal reader As TextReader)
If reader Is Nothing Then
Throw GetArgumentNullException("reader")
End If
m_Reader = reader
ReadToBuffer()
End Sub
''' <summary>
''' An array of the strings that indicate a line is a comment
''' </summary>
''' <value>An array of comment indicators</value>
''' <remarks>Returns an empty array if not set</remarks>
#Disable Warning CA1819 ' Properties should not return arrays
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Property CommentTokens() As String()
#Enable Warning CA1819 ' Properties should not return arrays
Get
Return m_CommentTokens
End Get
Set(ByVal value As String())
CheckCommentTokensForWhitespace(value)
m_CommentTokens = value
m_NeedPropertyCheck = True
End Set
End Property
''' <summary>
''' Indicates whether or not there is any data (non ignorable lines) left to read in the file
''' </summary>
''' <value>True if there's more data to read, otherwise False</value>
''' <remarks>Ignores comments and blank lines</remarks>
Public ReadOnly Property EndOfData() As Boolean
Get
If m_EndOfData Then
Return m_EndOfData
End If
' Make sure we're not at end of file
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
m_EndOfData = True
Return True
End If
'See if we can get a data line
If PeekNextDataLine() IsNot Nothing Then
Return False
End If
m_EndOfData = True
Return True
End Get
End Property
''' <summary>
''' The line to the right of the cursor.
''' </summary>
''' <value>The number of the line</value>
''' <remarks>LineNumber returns the location in the file and has nothing to do with rows or fields</remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public ReadOnly Property LineNumber() As Long
Get
If m_LineNumber <> -1 Then
' See if we're at the end of file
If m_Reader.Peek = -1 And m_Position = m_CharsRead Then
CloseReader()
End If
End If
Return m_LineNumber
End Get
End Property
''' <summary>
''' Returns the last malformed line if there is one.
''' </summary>
''' <value>The last malformed line</value>
''' <remarks></remarks>
Public ReadOnly Property ErrorLine() As String
Get
Return m_ErrorLine
End Get
End Property
''' <summary>
''' Returns the line number of last malformed line if there is one.
''' </summary>
''' <value>The last malformed line number</value>
''' <remarks></remarks>
Public ReadOnly Property ErrorLineNumber() As Long
Get
Return m_ErrorLineNumber
End Get
End Property
''' <summary>
''' Indicates the type of file being read, either fixed width or delimited
''' </summary>
''' <value>The type of fields in the file</value>
''' <remarks></remarks>
Public Property TextFieldType() As FieldType
Get
Return m_TextFieldType
End Get
Set(ByVal value As FieldType)
ValidateFieldTypeEnumValue(value, NameOf(value))
m_TextFieldType = value
m_NeedPropertyCheck = True
End Set
End Property
''' <summary>
''' Gets or sets the widths of the fields for reading a fixed width file
''' </summary>
''' <value>An array of the widths</value>
''' <remarks></remarks>
#Disable Warning CA1819 ' Properties should not return arrays
Public Property FieldWidths() As Integer()
#Enable Warning CA1819 ' Properties should not return arrays
Get
Return m_FieldWidths
End Get
Set(ByVal value As Integer())
If value IsNot Nothing Then
ValidateFieldWidthsOnInput(value)
' Keep a copy so we can determine if the user changes elements of the array
m_FieldWidthsCopy = DirectCast(value.Clone(), Integer())
Else
m_FieldWidthsCopy = Nothing
End If
m_FieldWidths = value
m_NeedPropertyCheck = True
End Set
End Property
''' <summary>
''' Gets or sets the delimiters used in a file
''' </summary>
''' <value>An array of the delimiters</value>
''' <remarks></remarks>
#Disable Warning CA1819 ' Properties should not return arrays
Public Property Delimiters() As String()
#Enable Warning CA1819 ' Properties should not return arrays
Get
Return m_Delimiters
End Get
Set(ByVal value As String())
If value IsNot Nothing Then
ValidateDelimiters(value)
' Keep a copy so we can determine if the user changes elements of the array
m_DelimitersCopy = DirectCast(value.Clone(), String())
Else
m_DelimitersCopy = Nothing
End If
m_Delimiters = value
m_NeedPropertyCheck = True
' Force rebuilding of regex
m_BeginQuotesRegex = Nothing
End Set
End Property
''' <summary>
''' Helper function to enable setting delimiters without diming an array
''' </summary>
''' <param name="delimiters">A list of the delimiters</param>
''' <remarks></remarks>
Public Sub SetDelimiters(ByVal ParamArray delimiters As String())
Me.Delimiters = delimiters
End Sub
''' <summary>
''' Helper function to enable setting field widths without diming an array
''' </summary>
''' <param name="fieldWidths">A list of field widths</param>
''' <remarks></remarks>
Public Sub SetFieldWidths(ByVal ParamArray fieldWidths As Integer())
Me.FieldWidths = fieldWidths
End Sub
''' <summary>
''' Indicates whether or not leading and trailing white space should be removed when returning a field
''' </summary>
''' <value>True if white space should be removed, otherwise False</value>
''' <remarks></remarks>
Public Property TrimWhiteSpace() As Boolean
Get
Return m_TrimWhiteSpace
End Get
Set(ByVal value As Boolean)
m_TrimWhiteSpace = value
End Set
End Property
''' <summary>
''' Reads and returns the next line from the file
''' </summary>
''' <returns>The line read or Nothing if at the end of the file</returns>
''' <remarks>This is data unaware method. It simply reads the next line in the file.</remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Function ReadLine() As String
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
Dim Line As String
' Set the method to be used when we reach the end of the buffer
Dim BufferFunction As New ChangeBufferFunction(AddressOf ReadToBuffer)
Line = ReadNextLine(m_Position, BufferFunction)
If Line Is Nothing Then
FinishReading()
Return Nothing
Else
m_LineNumber += 1
Return Line.TrimEnd(Chr(13), Chr(10))
End If
End Function
''' <summary>
''' Reads a non ignorable line and parses it into fields
''' </summary>
''' <returns>The line parsed into fields</returns>
''' <remarks>This is a data aware method. Comments and blank lines are ignored.</remarks>
Public Function ReadFields() As String()
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
ValidateReadyToRead()
Select Case m_TextFieldType
Case FieldType.FixedWidth
Return ParseFixedWidthLine()
Case FieldType.Delimited
Return ParseDelimitedLine()
Case Else
Debug.Fail("The TextFieldType is not supported")
End Select
Return Nothing
End Function
''' <summary>
''' Enables looking at the passed in number of characters of the next data line without reading the line
''' </summary>
''' <param name="numberOfChars"></param>
''' <returns>A string consisting of the first NumberOfChars characters of the next line</returns>
''' <remarks>If numberOfChars is greater than the next line, only the next line is returned</remarks>
Public Function PeekChars(ByVal numberOfChars As Integer) As String
If numberOfChars <= 0 Then
Throw GetArgumentExceptionWithArgName("numberOfChars", SR.TextFieldParser_NumberOfCharsMustBePositive)
End If
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
' If we know there's no more data return Nothing
If m_EndOfData Then
Return Nothing
End If
' Get the next line without reading it
Dim Line As String = PeekNextDataLine()
If Line Is Nothing Then
m_EndOfData = True
Return Nothing
End If
' Strip of end of line chars
Line = Line.TrimEnd(Chr(13), Chr(10))
' If the number of chars is larger than the line, return the whole line. Otherwise
' return the NumberOfChars characters from the beginning of the line
If Line.Length < numberOfChars Then
Return Line
Else
Dim info As New StringInfo(Line)
Return info.SubstringByTextElements(0, numberOfChars)
End If
End Function
''' <summary>
''' Reads the file starting at the current position and moving to the end of the file
''' </summary>
''' <returns>The contents of the file from the current position to the end of the file</returns>
''' <remarks>This is not a data aware method. Everything in the file from the current position to the end is read</remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Function ReadToEnd() As String
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
Dim Builder As New System.Text.StringBuilder(m_Buffer.Length)
' Get the lines in the Buffer first
Builder.Append(m_Buffer, m_Position, m_CharsRead - m_Position)
' Add what we haven't read
Builder.Append(m_Reader.ReadToEnd())
FinishReading()
Return Builder.ToString()
End Function
''' <summary>
''' Indicates whether or not to handle quotes in a csv friendly way
''' </summary>
''' <value>True if we escape quotes otherwise false</value>
''' <remarks></remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Property HasFieldsEnclosedInQuotes() As Boolean
Get
Return m_HasFieldsEnclosedInQuotes
End Get
Set(ByVal value As Boolean)
m_HasFieldsEnclosedInQuotes = value
End Set
End Property
''' <summary>
''' Closes the StreamReader
''' </summary>
''' <remarks></remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Sub Close()
CloseReader()
End Sub
''' <summary>
''' Closes the StreamReader
''' </summary>
''' <remarks></remarks>
Public Sub Dispose() Implements System.IDisposable.Dispose
Dispose(True)
GC.SuppressFinalize(Me)
End Sub
''' <summary>
''' Standard implementation of IDisposable.Dispose for non sealed classes. Classes derived from
''' TextFieldParser should override this method. After doing their own cleanup, they should call
''' this method (MyBase.Dispose(disposing))
''' </summary>
''' <param name="disposing">Indicates we are called by Dispose and not GC</param>
''' <remarks></remarks>
Protected Overridable Sub Dispose(ByVal disposing As Boolean)
If disposing Then
If Not Me.m_Disposed Then
Close()
End If
Me.m_Disposed = True
End If
End Sub
''' <summary>
''' Validates that the value being passed as an FieldType is a legal value
''' </summary>
''' <param name="value"></param>
''' <remarks></remarks>
Private Sub ValidateFieldTypeEnumValue(ByVal value As FieldType, ByVal paramName As String)
If value < FieldType.Delimited OrElse value > FieldType.FixedWidth Then
Throw New System.ComponentModel.InvalidEnumArgumentException(paramName, DirectCast(value, Integer), GetType(FieldType))
End If
End Sub
''' <summary>
''' Clean up following dispose pattern
''' </summary>
''' <remarks></remarks>
Protected Overrides Sub Finalize()
' Do not change this code. Put cleanup code in Dispose(ByVal disposing As Boolean) above.
Dispose(False)
MyBase.Finalize()
End Sub
''' <summary>
''' Closes the StreamReader
''' </summary>
''' <remarks></remarks>
Private Sub CloseReader()
FinishReading()
If m_Reader IsNot Nothing Then
If Not m_LeaveOpen Then
m_Reader.Close()
End If
m_Reader = Nothing
End If
End Sub
''' <summary>
''' Cleans up managed resources except the StreamReader and indicates reading is finished
''' </summary>
''' <remarks></remarks>
Private Sub FinishReading()
m_LineNumber = -1
m_EndOfData = True
m_Buffer = Nothing
m_DelimiterRegex = Nothing
m_BeginQuotesRegex = Nothing
End Sub
''' <summary>
''' Creates a StreamReader for the passed in Path
''' </summary>
''' <param name="path">The passed in path</param>
''' <param name="defaultEncoding">The encoding to default to if encoding can't be detected</param>
''' <param name="detectEncoding">Indicates whether or not to detect encoding from the BOM</param>
''' <remarks>We validate the arguments here for the three Public constructors that take a Path</remarks>
Private Sub InitializeFromPath(ByVal path As String, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
If path = "" Then
Throw GetArgumentNullException("path")
End If
If defaultEncoding Is Nothing Then
Throw GetArgumentNullException("defaultEncoding")
End If
Dim fullPath As String = ValidatePath(path)
Dim fileStreamTemp As New FileStream(fullPath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
m_Reader = New StreamReader(fileStreamTemp, defaultEncoding, detectEncoding)
ReadToBuffer()
End Sub
''' <summary>
''' Creates a StreamReader for a passed in stream
''' </summary>
''' <param name="stream">The passed in stream</param>
''' <param name="defaultEncoding">The encoding to default to if encoding can't be detected</param>
''' <param name="detectEncoding">Indicates whether or not to detect encoding from the BOM</param>
''' <remarks>We validate the arguments here for the three Public constructors that take a Stream</remarks>
Private Sub InitializeFromStream(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
If stream Is Nothing Then
Throw GetArgumentNullException("stream")
End If
If Not stream.CanRead Then
Throw GetArgumentExceptionWithArgName("stream", SR.TextFieldParser_StreamNotReadable, "stream")
End If
If defaultEncoding Is Nothing Then
Throw GetArgumentNullException("defaultEncoding")
End If
m_Reader = New StreamReader(stream, defaultEncoding, detectEncoding)
ReadToBuffer()
End Sub
''' <summary>
''' Gets full name and path from passed in path.
''' </summary>
''' <param name="path">The path to be validated</param>
''' <returns>The full name and path</returns>
''' <remarks>Throws if the file doesn't exist or if the path is malformed</remarks>
Private Function ValidatePath(ByVal path As String) As String
' Validate and get full path
Dim fullPath As String = FileSystem.NormalizeFilePath(path, "path")
' Make sure the file exists
If Not File.Exists(fullPath) Then
Throw New IO.FileNotFoundException(SR.Format(SR.IO_FileNotFound_Path, fullPath))
End If
Return fullPath
End Function
''' <summary>
''' Indicates whether or not the passed in line should be ignored
''' </summary>
''' <param name="line">The line to be tested</param>
''' <returns>True if the line should be ignored, otherwise False</returns>
''' <remarks>Lines to ignore are blank lines and comments</remarks>
Private Function IgnoreLine(ByVal line As String) As Boolean
' If the Line is Nothing, it has meaning (we've reached the end of the file) so don't
' ignore it
If line Is Nothing Then
Return False
End If
' Ignore empty or whitespace lines
Dim TrimmedLine As String = line.Trim()
If TrimmedLine.Length = 0 Then
Return True
End If
' Ignore comments
If m_CommentTokens IsNot Nothing Then
For Each Token As String In m_CommentTokens
If Token = "" Then
Continue For
End If
If TrimmedLine.StartsWith(Token, StringComparison.Ordinal) Then
Return True
End If
' Test original line in case whitespace char is a comment token
If line.StartsWith(Token, StringComparison.Ordinal) Then
Return True
End If
Next
End If
Return False
End Function
''' <summary>
''' Reads characters from the file into the buffer
''' </summary>
''' <returns>The number of Chars read. If no Chars are read, we're at the end of the file</returns>
''' <remarks></remarks>
Private Function ReadToBuffer() As Integer
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(m_Reader IsNot Nothing, "There's no StreamReader")
' Set cursor to beginning of buffer
m_Position = 0
Dim BufferLength As Integer = m_Buffer.Length
Debug.Assert(BufferLength >= DEFAULT_BUFFER_LENGTH, "Buffer shrunk to below default")
' If the buffer has grown, shrink it back to the default size
If BufferLength > DEFAULT_BUFFER_LENGTH Then
BufferLength = DEFAULT_BUFFER_LENGTH
ReDim m_Buffer(BufferLength - 1)
End If
' Read from the stream
m_CharsRead = m_Reader.Read(m_Buffer, 0, BufferLength)
' Return the number of Chars read
Return m_CharsRead
End Function
''' <summary>
''' Moves the cursor and all the data to the right of the cursor to the front of the buffer. It
''' then fills the remainder of the buffer from the file
''' </summary>
''' <returns>The number of Chars read in filling the remainder of the buffer</returns>
''' <remarks>
''' This should be called when we want to make maximum use of the space in the buffer. Characters
''' to the left of the cursor have already been read and can be discarded.
'''</remarks>
Private Function SlideCursorToStartOfBuffer() As Integer
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(m_Reader IsNot Nothing, "There's no StreamReader")
Debug.Assert(m_Position >= 0 And m_Position <= m_Buffer.Length, "The cursor is out of range")
' No need to slide if we're already at the beginning
If m_Position > 0 Then
Dim ContentLength As Integer = m_CharsRead - m_Position
Array.Copy(m_Buffer, m_Position, m_Buffer, 0, ContentLength)
' Try to fill the rest of the buffer
Dim CharsRead As Integer = m_Reader.Read(m_Buffer, ContentLength, m_Buffer.Length - ContentLength)
m_CharsRead = ContentLength + CharsRead
m_Position = 0
Return CharsRead
End If
Return 0
End Function
''' <summary>
''' Increases the size of the buffer. Used when we are at the end of the buffer, we need
''' to read more data from the file, and we can't discard what we've already read.
''' </summary>
''' <returns>The number of characters read to fill the new buffer</returns>
''' <remarks>This is needed for PeekChars and EndOfData</remarks>
Private Function IncreaseBufferSize() As Integer
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(m_Reader IsNot Nothing, "There's no StreamReader")
Debug.Assert(m_Position = 0, "Non-zero position")
' Set cursor
m_PeekPosition = m_CharsRead
If m_CharsRead = m_Buffer.Length Then
' Create a larger buffer and copy our data into it
Dim BufferSize As Integer = m_Buffer.Length + DEFAULT_BUFFER_LENGTH
' Make sure the buffer hasn't grown too large
If BufferSize > m_MaxBufferSize Then
Throw GetInvalidOperationException(SR.TextFieldParser_BufferExceededMaxSize)
End If
Dim TempArray(BufferSize - 1) As Char
Array.Copy(m_Buffer, TempArray, m_Buffer.Length)
m_Buffer = TempArray
End If
Dim CharsRead As Integer = m_Reader.Read(m_Buffer, m_CharsRead, m_Buffer.Length - m_CharsRead)
Debug.Assert(CharsRead <= m_Buffer.Length - m_CharsRead, "We've read more chars than we have space for")
m_CharsRead += CharsRead
Return CharsRead
End Function
''' <summary>
''' Returns the next line of data or nothing if there's no more data to be read
''' </summary>
''' <returns>The next line of data</returns>
''' <remarks>Moves the cursor past the line read</remarks>
Private Function ReadNextDataLine() As String
Dim Line As String
' Set function to use when we reach the end of the buffer
Dim BufferFunction As New ChangeBufferFunction(AddressOf ReadToBuffer)
Do
Line = ReadNextLine(m_Position, BufferFunction)
m_LineNumber += 1
Loop While IgnoreLine(Line)
If Line Is Nothing Then
CloseReader()
End If
Return Line
End Function
''' <summary>
''' Returns the next data line but doesn't move the cursor
''' </summary>
''' <returns>The next data line, or Nothing if there's no more data</returns>
''' <remarks></remarks>
Private Function PeekNextDataLine() As String
Dim Line As String
' Set function to use when we reach the end of the buffer
Dim BufferFunction As New ChangeBufferFunction(AddressOf IncreaseBufferSize)
' Slide the data to the left so that we make maximum use of the buffer
SlideCursorToStartOfBuffer()
m_PeekPosition = 0
Do
Line = ReadNextLine(m_PeekPosition, BufferFunction)
Loop While IgnoreLine(Line)
Return Line
End Function
''' <summary>
''' Function to call when we're at the end of the buffer. We either re fill the buffer
''' or change the size of the buffer
''' </summary>
''' <returns></returns>
''' <remarks></remarks>
Private Delegate Function ChangeBufferFunction() As Integer
''' <summary>
''' Gets the next line from the file and moves the passed in cursor past the line
''' </summary>
''' <param name="Cursor">Indicates the current position in the buffer</param>
''' <param name="ChangeBuffer">Function to call when we've reached the end of the buffer</param>
''' <returns>The next line in the file</returns>
''' <remarks>Returns Nothing if we are at the end of the file</remarks>
Private Function ReadNextLine(ByRef Cursor As Integer, ByVal ChangeBuffer As ChangeBufferFunction) As String
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(Cursor >= 0 And Cursor <= m_CharsRead, "The cursor is out of range")
' Check to see if the cursor is at the end of the chars in the buffer. If it is, re fill the buffer
If Cursor = m_CharsRead Then
If ChangeBuffer() = 0 Then
' We're at the end of the file
Return Nothing
End If
End If
Dim Builder As StringBuilder = Nothing
Do
' Walk through buffer looking for the end of a line. End of line can be vbLf (\n), vbCr (\r) or vbCrLf (\r\n)
For i As Integer = Cursor To m_CharsRead - 1
Dim Character As Char = m_Buffer(i)
If Character = vbCr Or Character = vbLf Then
' We've found the end of a line so add everything we've read so far to the
' builder. We include the end of line char because we need to know what it is
' in case it's embedded in a field.
If Builder IsNot Nothing Then
Builder.Append(m_Buffer, Cursor, i - Cursor + 1)
Else
Builder = New StringBuilder(i + 1)
Builder.Append(m_Buffer, Cursor, i - Cursor + 1)
End If
Cursor = i + 1
#Disable Warning CA1834 ' Consider using 'StringBuilder.Append(char)' when applicable
' See if vbLf should be added as well
If Character = vbCr Then
If Cursor < m_CharsRead Then
If m_Buffer(Cursor) = vbLf Then
Cursor += 1
Builder.Append(vbLf)
End If
ElseIf ChangeBuffer() > 0 Then
If m_Buffer(Cursor) = vbLf Then
Cursor += 1
Builder.Append(vbLf)
End If
End If
End If
#Enable Warning CA1834 ' Consider using 'StringBuilder.Append(char)' when applicable
Return Builder.ToString()
End If
Next i
' We've searched the whole buffer and haven't found an end of line. Save what we have, and read more to the buffer.
Dim Size As Integer = m_CharsRead - Cursor
If Builder Is Nothing Then
Builder = New StringBuilder(Size + DEFAULT_BUILDER_INCREASE)
End If
Builder.Append(m_Buffer, Cursor, Size)
Loop While ChangeBuffer() > 0
Return Builder.ToString()
End Function
''' <summary>
''' Gets the next data line and parses it with the delimiters
''' </summary>
''' <returns>An array of the fields in the line</returns>
''' <remarks></remarks>
Private Function ParseDelimitedLine() As String()
Dim Line As String = ReadNextDataLine()
If Line Is Nothing Then
Return Nothing
End If
' The line number is that of the line just read
Dim CurrentLineNumber As Long = m_LineNumber - 1
Dim Index As Integer = 0
Dim Fields As New System.Collections.Generic.List(Of String)
Dim Field As String
Dim LineEndIndex As Integer = GetEndOfLineIndex(Line)
While Index <= LineEndIndex
' Is the field delimited in quotes? We only care about this if
' EscapedQuotes is True
Dim MatchResult As Match = Nothing
Dim QuoteDelimited As Boolean = False
If m_HasFieldsEnclosedInQuotes Then
MatchResult = BeginQuotesRegex.Match(Line, Index)
QuoteDelimited = MatchResult.Success
End If
If QuoteDelimited Then
'Move the Index beyond quote
Index = MatchResult.Index + MatchResult.Length
' Look for the closing "
Dim EndHelper As New QuoteDelimitedFieldBuilder(m_DelimiterWithEndCharsRegex, m_SpaceChars)
EndHelper.BuildField(Line, Index)
If EndHelper.MalformedLine Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedDelimitedLine, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
If EndHelper.FieldFinished Then
Field = EndHelper.Field
Index = EndHelper.Index + EndHelper.DelimiterLength
Else
' We may have an embedded line end character, so grab next line
Dim NewLine As String
Dim EndOfLine As Integer
Do
EndOfLine = Line.Length
' Get the next data line
NewLine = ReadNextDataLine()
' If we didn't get a new line, we're at the end of the file so our original line is malformed
If NewLine Is Nothing Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedDelimitedLine, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
If Line.Length + NewLine.Length > m_MaxLineSize Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MaxLineSizeExceeded, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
Line &= NewLine
LineEndIndex = GetEndOfLineIndex(Line)
EndHelper.BuildField(Line, EndOfLine)
If EndHelper.MalformedLine Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedDelimitedLine, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
Loop Until EndHelper.FieldFinished
Field = EndHelper.Field
Index = EndHelper.Index + EndHelper.DelimiterLength
End If
If m_TrimWhiteSpace Then
Field = Field.Trim()
End If
Fields.Add(Field)
Else
' Find the next delimiter
Dim DelimiterMatch As Match = m_DelimiterRegex.Match(Line, Index)
If DelimiterMatch.Success Then
Field = Line.Substring(Index, DelimiterMatch.Index - Index)
If m_TrimWhiteSpace Then
Field = Field.Trim()
End If
Fields.Add(Field)
' Move the index
Index = DelimiterMatch.Index + DelimiterMatch.Length
Else
' We're at the end of the line so the field consists of all that's left of the line
' minus the end of line chars
Field = Line.Substring(Index).TrimEnd(Chr(13), Chr(10))
If m_TrimWhiteSpace Then
Field = Field.Trim()
End If
Fields.Add(Field)
Exit While
End If
End If
End While
Return Fields.ToArray()
End Function
''' <summary>
''' Gets the next data line and parses into fixed width fields
''' </summary>
''' <returns>An array of the fields in the line</returns>
''' <remarks></remarks>
Private Function ParseFixedWidthLine() As String()
Debug.Assert(m_FieldWidths IsNot Nothing, "No field widths")
Dim Line As String = ReadNextDataLine()
If Line Is Nothing Then
Return Nothing
End If
' Strip off trailing carriage return or line feed
Line = Line.TrimEnd(Chr(13), Chr(10))
Dim LineInfo As New StringInfo(Line)
ValidateFixedWidthLine(LineInfo, m_LineNumber - 1)
Dim Index As Integer = 0
Dim Bound As Integer = m_FieldWidths.Length - 1
Dim Fields(Bound) As String
For i As Integer = 0 To Bound
Fields(i) = GetFixedWidthField(LineInfo, Index, m_FieldWidths(i))
Index += m_FieldWidths(i)
Next
Return Fields
End Function
''' <summary>
''' Returns the field at the passed in index
''' </summary>
''' <param name="Line">The string containing the fields</param>
''' <param name="Index">The start of the field</param>
''' <param name="FieldLength">The length of the field</param>
''' <returns>The field</returns>
''' <remarks></remarks>
Private Function GetFixedWidthField(ByVal Line As StringInfo, ByVal Index As Integer, ByVal FieldLength As Integer) As String
Dim Field As String
If FieldLength > 0 Then
Field = Line.SubstringByTextElements(Index, FieldLength)
Else
' Make sure the index isn't past the string
If Index >= Line.LengthInTextElements Then
Field = String.Empty
Else
Field = Line.SubstringByTextElements(Index).TrimEnd(Chr(13), Chr(10))
End If
End If
If m_TrimWhiteSpace Then
Return Field.Trim()
Else
Return Field
End If
End Function
''' <summary>
''' Gets the index of the first end of line character
''' </summary>
''' <param name="Line"></param>
''' <returns></returns>
''' <remarks>When there are no end of line characters, the index is the length (one past the end)</remarks>
Private Function GetEndOfLineIndex(ByVal Line As String) As Integer
Debug.Assert(Line IsNot Nothing, "We are parsing a Nothing")
Dim Length As Integer = Line.Length
Debug.Assert(Length > 0, "A blank line shouldn't be parsed")
If Length = 1 Then
Debug.Assert(Line(0) <> vbCr And Line(0) <> vbLf, "A blank line shouldn't be parsed")
Return Length
End If
' Check the next to last and last char for end line characters
If Line(Length - 2) = vbCr Or Line(Length - 2) = vbLf Then
Return Length - 2
ElseIf Line(Length - 1) = vbCr Or Line(Length - 1) = vbLf Then
Return Length - 1
Else
Return Length
End If
End Function
''' <summary>
''' Indicates whether or not a line is valid
''' </summary>
''' <param name="Line">The line to be tested</param>
''' <param name="LineNumber">The line number, used for exception</param>
''' <remarks></remarks>
Private Sub ValidateFixedWidthLine(ByVal Line As StringInfo, ByVal LineNumber As Long)
Debug.Assert(Line IsNot Nothing, "No Line sent")
' The only malformed line for fixed length fields is one that's too short
If Line.LengthInTextElements < m_LineLength Then
m_ErrorLine = Line.String
m_ErrorLineNumber = m_LineNumber - 1
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedFixedWidthLine, LineNumber.ToString(CultureInfo.InvariantCulture)), LineNumber)
End If
End Sub
''' <summary>
''' Determines whether or not the field widths are valid, and sets the size of a line
''' </summary>
''' <remarks></remarks>
Private Sub ValidateFieldWidths()
If m_FieldWidths Is Nothing Then
Throw GetInvalidOperationException(SR.TextFieldParser_FieldWidthsNothing)
End If
If m_FieldWidths.Length = 0 Then
Throw GetInvalidOperationException(SR.TextFieldParser_FieldWidthsNothing)
End If
Dim WidthBound As Integer = m_FieldWidths.Length - 1
m_LineLength = 0
' add all but the last element
For i As Integer = 0 To WidthBound - 1
Debug.Assert(m_FieldWidths(i) > 0, "Bad field width, this should have been caught on input")
m_LineLength += m_FieldWidths(i)
Next
' add the last field if it's greater than zero (IE not ragged).
If m_FieldWidths(WidthBound) > 0 Then
m_LineLength += m_FieldWidths(WidthBound)
End If
End Sub
''' <summary>
''' Checks the field widths at input.
''' </summary>
''' <param name="Widths"></param>
''' <remarks>
''' All field widths, except the last one, must be greater than zero. If the last width is
''' less than one it indicates the last field is ragged
'''</remarks>
Private Sub ValidateFieldWidthsOnInput(ByVal Widths() As Integer)
Debug.Assert(Widths IsNot Nothing, "There are no field widths")
Dim Bound As Integer = Widths.Length - 1
For i As Integer = 0 To Bound - 1
If Widths(i) < 1 Then
Throw GetArgumentExceptionWithArgName("FieldWidths", SR.TextFieldParser_FieldWidthsMustPositive)
End If
Next
End Sub
''' <summary>
''' Validates the delimiters and creates the Regex objects for finding delimiters or quotes followed
''' by delimiters
''' </summary>
''' <remarks></remarks>
Private Sub ValidateAndEscapeDelimiters()
If m_Delimiters Is Nothing Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_DelimitersNothing)
End If
If m_Delimiters.Length = 0 Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_DelimitersNothing)
End If
Dim Length As Integer = m_Delimiters.Length
Dim Builder As StringBuilder = New StringBuilder()
Dim QuoteBuilder As StringBuilder = New StringBuilder()
' Add ending quote pattern. It will be followed by delimiters resulting in a string like:
' "[ ]*(d1|d2|d3)
QuoteBuilder.Append(EndQuotePattern & "(")
For i As Integer = 0 To Length - 1
If m_Delimiters(i) IsNot Nothing Then
' Make sure delimiter is legal
If m_HasFieldsEnclosedInQuotes Then
If m_Delimiters(i).IndexOf(""""c) > -1 Then
Throw GetInvalidOperationException(SR.TextFieldParser_IllegalDelimiter)
End If
End If
Dim EscapedDelimiter As String = Regex.Escape(m_Delimiters(i))
Builder.Append(EscapedDelimiter & "|")
QuoteBuilder.Append(EscapedDelimiter & "|")
Else
Debug.Fail("Delimiter element is empty. This should have been caught on input")
End If
Next
m_SpaceChars = WhitespaceCharacters
' Get rid of trailing | and set regex
m_DelimiterRegex = New Regex(Builder.ToString(0, Builder.Length - 1), REGEX_OPTIONS)
Builder.Append(vbCr & "|" & vbLf)
m_DelimiterWithEndCharsRegex = New Regex(Builder.ToString(), REGEX_OPTIONS)
' Add end of line (either Cr, Ln, or nothing) and set regex
QuoteBuilder.Append(vbCr & "|" & vbLf & ")|""$")
End Sub
''' <summary>
''' Checks property settings to ensure we're able to read fields.
''' </summary>
''' <remarks>Throws if we're not able to read fields with current property settings</remarks>
Private Sub ValidateReadyToRead()
If m_NeedPropertyCheck Or ArrayHasChanged() Then
Select Case m_TextFieldType
Case FieldType.Delimited
ValidateAndEscapeDelimiters()
Case FieldType.FixedWidth
' Check FieldWidths
ValidateFieldWidths()
Case Else
Debug.Fail("Unknown TextFieldType")
End Select
' Check Comment Tokens
If m_CommentTokens IsNot Nothing Then
For Each Token As String In m_CommentTokens
If Token <> "" Then
If m_HasFieldsEnclosedInQuotes And m_TextFieldType = FieldType.Delimited Then
If String.Equals(Token.Trim(), """", StringComparison.Ordinal) Then
Throw GetInvalidOperationException(SR.TextFieldParser_InvalidComment)
End If
End If
End If
Next
End If
m_NeedPropertyCheck = False
End If
End Sub
''' <summary>
''' Throws if any of the delimiters contain line end characters
''' </summary>
''' <param name="delimiterArray">A string array of delimiters</param>
''' <remarks></remarks>
Private Sub ValidateDelimiters(ByVal delimiterArray() As String)
If delimiterArray Is Nothing Then
Return
End If
For Each delimiter As String In delimiterArray
If delimiter = "" Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_DelimiterNothing, "Delimiters")
End If
If delimiter.IndexOfAny(New Char() {Chr(13), Chr(10)}) > -1 Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_EndCharsInDelimiter)
End If
Next
End Sub
''' <summary>
''' Determines if the FieldWidths or Delimiters arrays have changed.
''' </summary>
''' <remarks>If the array has changed, we need to re initialize before reading.</remarks>
Private Function ArrayHasChanged() As Boolean
Dim lowerBound As Integer = 0
Dim upperBound As Integer = 0
Select Case m_TextFieldType
Case FieldType.Delimited
Debug.Assert((m_DelimitersCopy Is Nothing And m_Delimiters Is Nothing) Or (m_DelimitersCopy IsNot Nothing And m_Delimiters IsNot Nothing), "Delimiters and copy are not both Nothing or both not Nothing")
' Check null cases
If m_Delimiters Is Nothing Then
Return False
End If
lowerBound = m_DelimitersCopy.GetLowerBound(0)
upperBound = m_DelimitersCopy.GetUpperBound(0)
For i As Integer = lowerBound To upperBound
If m_Delimiters(i) <> m_DelimitersCopy(i) Then
Return True
End If
Next i
Case FieldType.FixedWidth
Debug.Assert((m_FieldWidthsCopy Is Nothing And m_FieldWidths Is Nothing) Or (m_FieldWidthsCopy IsNot Nothing And m_FieldWidths IsNot Nothing), "FieldWidths and copy are not both Nothing or both not Nothing")
' Check null cases
If m_FieldWidths Is Nothing Then
Return False
End If
lowerBound = m_FieldWidthsCopy.GetLowerBound(0)
upperBound = m_FieldWidthsCopy.GetUpperBound(0)
For i As Integer = lowerBound To upperBound
If m_FieldWidths(i) <> m_FieldWidthsCopy(i) Then
Return True
End If
Next i
Case Else
Debug.Fail("Unknown TextFieldType")
End Select
Return False
End Function
''' <summary>
''' Throws if any of the comment tokens contain whitespace
''' </summary>
''' <param name="tokens">A string array of comment tokens</param>
''' <remarks></remarks>
Private Sub CheckCommentTokensForWhitespace(ByVal tokens() As String)
If tokens Is Nothing Then
Return
End If
For Each token As String In tokens
If m_WhiteSpaceRegEx.IsMatch(token) Then
Throw GetArgumentExceptionWithArgName("CommentTokens", SR.TextFieldParser_WhitespaceInToken)
End If
Next
End Sub
''' <summary>
''' Gets the appropriate regex for finding a field beginning with quotes
''' </summary>
''' <value>The right regex</value>
''' <remarks></remarks>
Private ReadOnly Property BeginQuotesRegex() As Regex
Get
If m_BeginQuotesRegex Is Nothing Then
' Get the pattern
Dim pattern As String = String.Format(CultureInfo.InvariantCulture, BEGINS_WITH_QUOTE, WhitespacePattern)
m_BeginQuotesRegex = New Regex(pattern, REGEX_OPTIONS)
End If
Return m_BeginQuotesRegex
End Get
End Property
''' <summary>
''' Gets the appropriate expression for finding ending quote of a field
''' </summary>
''' <value>The expression</value>
''' <remarks></remarks>
Private ReadOnly Property EndQuotePattern() As String
Get
Return String.Format(CultureInfo.InvariantCulture, ENDING_QUOTE, WhitespacePattern)
End Get
End Property
''' <summary>
''' Returns a string containing all the characters which are whitespace for parsing purposes
''' </summary>
''' <value></value>
''' <remarks></remarks>
Private ReadOnly Property WhitespaceCharacters() As String
Get
Dim builder As New StringBuilder
For Each code As Integer In m_WhitespaceCodes
Dim spaceChar As Char = ChrW(code)
If Not CharacterIsInDelimiter(spaceChar) Then
builder.Append(spaceChar)
End If
Next
Return builder.ToString()
End Get
End Property
''' <summary>
''' Gets the character set of white-spaces to be used in a regex pattern
''' </summary>
''' <value></value>
''' <remarks></remarks>
Private ReadOnly Property WhitespacePattern() As String
Get
Dim builder As New StringBuilder()
For Each code As Integer In m_WhitespaceCodes
Dim spaceChar As Char = ChrW(code)
If Not CharacterIsInDelimiter(spaceChar) Then
' Gives us something like \u00A0
builder.Append("\u" & code.ToString("X4", CultureInfo.InvariantCulture))
End If
Next
Return builder.ToString()
End Get
End Property
''' <summary>
''' Checks to see if the passed in character is in any of the delimiters
''' </summary>
''' <param name="testCharacter">The character to look for</param>
''' <returns>True if the character is found in a delimiter, otherwise false</returns>
''' <remarks></remarks>
Private Function CharacterIsInDelimiter(ByVal testCharacter As Char) As Boolean
Debug.Assert(m_Delimiters IsNot Nothing, "No delimiters set!")
For Each delimiter As String In m_Delimiters
If delimiter.IndexOf(testCharacter) > -1 Then
Return True
End If
Next
Return False
End Function
' Indicates reader has been disposed
Private m_Disposed As Boolean
' The internal StreamReader that reads the file
Private m_Reader As TextReader
' An array holding the strings that indicate a line is a comment
Private m_CommentTokens() As String = Array.Empty(Of String)()
' The line last read by either ReadLine or ReadFields
Private m_LineNumber As Long = 1
' Flags whether or not there is data left to read. Assume there is at creation
Private m_EndOfData As Boolean
' Holds the last malformed line
Private m_ErrorLine As String = ""
' Holds the line number of the last malformed line
Private m_ErrorLineNumber As Long = -1
' Indicates what type of fields are in the file (fixed width or delimited)
Private m_TextFieldType As FieldType = FieldType.Delimited
' An array of the widths of the fields in a fixed width file
Private m_FieldWidths() As Integer
' An array of the delimiters used for the fields in the file
Private m_Delimiters() As String
' Holds a copy of the field widths last set so we can respond to changes in the array
Private m_FieldWidthsCopy() As Integer
' Holds a copy of the field widths last set so we can respond to changes in the array
Private m_DelimitersCopy() As String
' Regular expression used to find delimiters
Private m_DelimiterRegex As Regex
' Regex used with BuildField
Private m_DelimiterWithEndCharsRegex As Regex
' Options used for regular expressions
Private Const REGEX_OPTIONS As RegexOptions = RegexOptions.CultureInvariant
' Codes for whitespace as used by String.Trim excluding line end chars as those are handled separately
Private m_WhitespaceCodes() As Integer = {&H9, &HB, &HC, &H20, &H85, &HA0, &H1680, &H2000, &H2001, &H2002, &H2003, &H2004, &H2005, &H2006, &H2007, &H2008, &H2009, &H200A, &H200B, &H2028, &H2029, &H3000, &HFEFF}
' Regular expression used to find beginning quotes ignore spaces and tabs
Private m_BeginQuotesRegex As Regex
' Regular expression for whitespace
Private m_WhiteSpaceRegEx As Regex = New Regex("\s", REGEX_OPTIONS)
' Indicates whether or not white space should be removed from a returned field
Private m_TrimWhiteSpace As Boolean = True
' The position of the cursor in the buffer
Private m_Position As Integer
' The position of the peek cursor
Private m_PeekPosition As Integer
' The number of chars in the buffer
Private m_CharsRead As Integer
' Indicates that the user has changed properties so that we need to validate before a read
Private m_NeedPropertyCheck As Boolean = True
' The default size for the buffer
Private Const DEFAULT_BUFFER_LENGTH As Integer = 4096
' This is a guess as to how much larger the string builder should be beyond the size of what
' we've already read
Private Const DEFAULT_BUILDER_INCREASE As Integer = 10
' Buffer used to hold data read from the file. It holds data that must be read
' ahead of the cursor (for PeekChars and EndOfData)
Private m_Buffer(DEFAULT_BUFFER_LENGTH - 1) As Char
' The minimum length for a valid fixed width line
Private m_LineLength As Integer
' Indicates whether or not we handle quotes in a csv appropriate way
Private m_HasFieldsEnclosedInQuotes As Boolean = True
' A string of the chars that count as spaces (used for csv format). The norm is spaces and tabs.
Private m_SpaceChars As String
' The largest size a line can be.
Private m_MaxLineSize As Integer = 10000000
' The largest size the buffer can be
Private m_MaxBufferSize As Integer = 10000000
' Regex pattern to determine if field begins with quotes
Private Const BEGINS_WITH_QUOTE As String = "\G[{0}]*"""
' Regex pattern to find a quote before a delimiter
Private Const ENDING_QUOTE As String = """[{0}]*"
' Indicates passed in stream should be not be closed
Private m_LeaveOpen As Boolean
End Class
''' <summary>
''' Enum used to indicate the kind of file being read, either delimited or fixed length
''' </summary>
''' <remarks></remarks>
Public Enum FieldType As Integer
'!!!!!!!!!! Changes to this enum must be reflected in ValidateFieldTypeEnumValue()
Delimited
FixedWidth
End Enum
''' <summary>
''' Helper class that when passed a line and an index to a quote delimited field
''' will build the field and handle escaped quotes
''' </summary>
''' <remarks></remarks>
Friend NotInheritable Class QuoteDelimitedFieldBuilder
''' <summary>
''' Creates an instance of the class and sets some properties
''' </summary>
''' <param name="DelimiterRegex">The regex used to find any of the delimiters</param>
''' <param name="SpaceChars">Characters treated as space (usually space and tab)</param>
''' <remarks></remarks>
Public Sub New(ByVal DelimiterRegex As Regex, ByVal SpaceChars As String)
m_DelimiterRegex = DelimiterRegex
m_SpaceChars = SpaceChars
End Sub
''' <summary>
''' Indicates whether or not the field has been built.
''' </summary>
''' <value>True if the field has been built, otherwise False</value>
''' <remarks>If the Field has been built, the Field property will return the entire field</remarks>
Public ReadOnly Property FieldFinished() As Boolean
Get
Return m_FieldFinished
End Get
End Property
''' <summary>
''' The field being built
''' </summary>
''' <value>The field</value>
''' <remarks></remarks>
Public ReadOnly Property Field() As String
Get
Return m_Field.ToString()
End Get
End Property
''' <summary>
''' The current index on the line. Used to indicate how much of the line was used to build the field
''' </summary>
''' <value>The current position on the line</value>
''' <remarks></remarks>
Public ReadOnly Property Index() As Integer
Get
Return m_Index
End Get
End Property
''' <summary>
''' The length of the closing delimiter if one was found
''' </summary>
''' <value>The length of the delimiter</value>
''' <remarks></remarks>
Public ReadOnly Property DelimiterLength() As Integer
Get
Return m_DelimiterLength
End Get
End Property
''' <summary>
''' Indicates that the current field breaks the subset of csv rules we enforce
''' </summary>
''' <value>True if the line is malformed, otherwise False</value>
''' <remarks>
''' The rules we enforce are:
''' Embedded quotes must be escaped
''' Only space characters can occur between a delimiter and a quote
'''</remarks>
Public ReadOnly Property MalformedLine() As Boolean
Get
Return m_MalformedLine
End Get
End Property
''' <summary>
''' Builds a field by walking through the passed in line starting at StartAt
''' </summary>
''' <param name="Line">The line containing the data</param>
''' <param name="StartAt">The index at which we start building the field</param>
''' <remarks></remarks>
Public Sub BuildField(ByVal Line As String, ByVal StartAt As Integer)
m_Index = StartAt
Dim Length As Integer = Line.Length
While m_Index < Length
If Line(m_Index) = """"c Then
' Are we at the end of the file?
If m_Index + 1 = Length Then
' We've found the end of the field
m_FieldFinished = True
m_DelimiterLength = 1
' Move index past end of line
m_Index += 1
Return
End If
' Check to see if this is an escaped quote
If m_Index + 1 < Line.Length And Line(m_Index + 1) = """"c Then
m_Field.Append(""""c)
m_Index += 2
Continue While
End If
' Find the next delimiter and make sure everything between the quote and
' the delimiter is ignorable
Dim Limit As Integer
Dim DelimiterMatch As Match = m_DelimiterRegex.Match(Line, m_Index + 1)
If Not DelimiterMatch.Success Then
Limit = Length - 1
Else
Limit = DelimiterMatch.Index - 1
End If
For i As Integer = m_Index + 1 To Limit
If m_SpaceChars.IndexOf(Line(i)) < 0 Then
m_MalformedLine = True
Return
End If
Next
' The length of the delimiter is the length of the closing quote (1) + any spaces + the length
' of the delimiter we matched if any
m_DelimiterLength = 1 + Limit - m_Index
If DelimiterMatch.Success Then
m_DelimiterLength += DelimiterMatch.Length
End If
m_FieldFinished = True
Return
Else
m_Field.Append(Line(m_Index))
m_Index += 1
End If
End While
End Sub
' String builder holding the field
Private m_Field As New StringBuilder
' Indicates m_Field contains the entire field
Private m_FieldFinished As Boolean
' The current index on the field
Private m_Index As Integer
' The length of the closing delimiter if one is found
Private m_DelimiterLength As Integer
' The regular expression used to find the next delimiter
Private m_DelimiterRegex As Regex
' Chars that should be counted as space (and hence ignored if occurring before or after a delimiter
Private m_SpaceChars As String
' Indicates the line breaks the csv rules we enforce
Private m_MalformedLine As Boolean
End Class
End Namespace
| ' Licensed to the .NET Foundation under one or more agreements.
' The .NET Foundation licenses this file to you under the MIT license.
Option Explicit On
Option Strict On
Imports System
Imports System.ComponentModel
Imports System.Diagnostics
Imports System.Globalization
Imports System.IO
Imports System.Text
Imports System.Text.RegularExpressions
Imports Microsoft.VisualBasic.CompilerServices.ExceptionUtils
Namespace Microsoft.VisualBasic.FileIO
''' <summary>
''' Enables parsing very large delimited or fixed width field files
''' </summary>
''' <remarks></remarks>
Public Class TextFieldParser
Implements IDisposable
''' <summary>
''' Creates a new TextFieldParser to parse the passed in file
''' </summary>
''' <param name="path">The path of the file to be parsed</param>
''' <remarks></remarks>
Public Sub New(ByVal path As String)
' Default to UTF-8 and detect encoding
InitializeFromPath(path, System.Text.Encoding.UTF8, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse the passed in file
''' </summary>
''' <param name="path">The path of the file to be parsed</param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <remarks></remarks>
Public Sub New(ByVal path As String, ByVal defaultEncoding As System.Text.Encoding)
' Default to detect encoding
InitializeFromPath(path, defaultEncoding, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse the passed in file
''' </summary>
''' <param name="path">The path of the file to be parsed</param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <param name="detectEncoding">Indicates whether or not to try to detect the encoding from the BOM</param>
''' <remarks></remarks>
Public Sub New(ByVal path As String, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
InitializeFromPath(path, defaultEncoding, detectEncoding)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream)
' Default to UTF-8 and detect encoding
InitializeFromStream(stream, System.Text.Encoding.UTF8, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding)
' Default to detect encoding
InitializeFromStream(stream, defaultEncoding, True)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <param name="detectEncoding">Indicates whether or not to try to detect the encoding from the BOM</param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
InitializeFromStream(stream, defaultEncoding, detectEncoding)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a file represented by the passed in stream
''' </summary>
''' <param name="stream"></param>
''' <param name="defaultEncoding">The decoding to default to if encoding isn't determined from file</param>
''' <param name="detectEncoding">Indicates whether or not to try to detect the encoding from the BOM</param>
''' <param name="leaveOpen">Indicates whether or not to leave the passed in stream open</param>
''' <remarks></remarks>
Public Sub New(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean, ByVal leaveOpen As Boolean)
m_LeaveOpen = leaveOpen
InitializeFromStream(stream, defaultEncoding, detectEncoding)
End Sub
''' <summary>
''' Creates a new TextFieldParser to parse a stream or file represented by the passed in TextReader
''' </summary>
''' <param name="reader">The TextReader that does the reading</param>
''' <remarks></remarks>
Public Sub New(ByVal reader As TextReader)
If reader Is Nothing Then
Throw GetArgumentNullException("reader")
End If
m_Reader = reader
ReadToBuffer()
End Sub
''' <summary>
''' An array of the strings that indicate a line is a comment
''' </summary>
''' <value>An array of comment indicators</value>
''' <remarks>Returns an empty array if not set</remarks>
#Disable Warning CA1819 ' Properties should not return arrays
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Property CommentTokens() As String()
#Enable Warning CA1819 ' Properties should not return arrays
Get
Return m_CommentTokens
End Get
Set(ByVal value As String())
CheckCommentTokensForWhitespace(value)
m_CommentTokens = value
m_NeedPropertyCheck = True
End Set
End Property
''' <summary>
''' Indicates whether or not there is any data (non ignorable lines) left to read in the file
''' </summary>
''' <value>True if there's more data to read, otherwise False</value>
''' <remarks>Ignores comments and blank lines</remarks>
Public ReadOnly Property EndOfData() As Boolean
Get
If m_EndOfData Then
Return m_EndOfData
End If
' Make sure we're not at end of file
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
m_EndOfData = True
Return True
End If
'See if we can get a data line
If PeekNextDataLine() IsNot Nothing Then
Return False
End If
m_EndOfData = True
Return True
End Get
End Property
''' <summary>
''' The line to the right of the cursor.
''' </summary>
''' <value>The number of the line</value>
''' <remarks>LineNumber returns the location in the file and has nothing to do with rows or fields</remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public ReadOnly Property LineNumber() As Long
Get
If m_LineNumber <> -1 Then
' See if we're at the end of file
If m_Reader.Peek = -1 And m_Position = m_CharsRead Then
CloseReader()
End If
End If
Return m_LineNumber
End Get
End Property
''' <summary>
''' Returns the last malformed line if there is one.
''' </summary>
''' <value>The last malformed line</value>
''' <remarks></remarks>
Public ReadOnly Property ErrorLine() As String
Get
Return m_ErrorLine
End Get
End Property
''' <summary>
''' Returns the line number of last malformed line if there is one.
''' </summary>
''' <value>The last malformed line number</value>
''' <remarks></remarks>
Public ReadOnly Property ErrorLineNumber() As Long
Get
Return m_ErrorLineNumber
End Get
End Property
''' <summary>
''' Indicates the type of file being read, either fixed width or delimited
''' </summary>
''' <value>The type of fields in the file</value>
''' <remarks></remarks>
Public Property TextFieldType() As FieldType
Get
Return m_TextFieldType
End Get
Set(ByVal value As FieldType)
ValidateFieldTypeEnumValue(value, NameOf(value))
m_TextFieldType = value
m_NeedPropertyCheck = True
End Set
End Property
''' <summary>
''' Gets or sets the widths of the fields for reading a fixed width file
''' </summary>
''' <value>An array of the widths</value>
''' <remarks></remarks>
#Disable Warning CA1819 ' Properties should not return arrays
Public Property FieldWidths() As Integer()
#Enable Warning CA1819 ' Properties should not return arrays
Get
Return m_FieldWidths
End Get
Set(ByVal value As Integer())
If value IsNot Nothing Then
ValidateFieldWidthsOnInput(value)
' Keep a copy so we can determine if the user changes elements of the array
m_FieldWidthsCopy = DirectCast(value.Clone(), Integer())
Else
m_FieldWidthsCopy = Nothing
End If
m_FieldWidths = value
m_NeedPropertyCheck = True
End Set
End Property
''' <summary>
''' Gets or sets the delimiters used in a file
''' </summary>
''' <value>An array of the delimiters</value>
''' <remarks></remarks>
#Disable Warning CA1819 ' Properties should not return arrays
Public Property Delimiters() As String()
#Enable Warning CA1819 ' Properties should not return arrays
Get
Return m_Delimiters
End Get
Set(ByVal value As String())
If value IsNot Nothing Then
ValidateDelimiters(value)
' Keep a copy so we can determine if the user changes elements of the array
m_DelimitersCopy = DirectCast(value.Clone(), String())
Else
m_DelimitersCopy = Nothing
End If
m_Delimiters = value
m_NeedPropertyCheck = True
' Force rebuilding of regex
m_BeginQuotesRegex = Nothing
End Set
End Property
''' <summary>
''' Helper function to enable setting delimiters without diming an array
''' </summary>
''' <param name="delimiters">A list of the delimiters</param>
''' <remarks></remarks>
Public Sub SetDelimiters(ByVal ParamArray delimiters As String())
Me.Delimiters = delimiters
End Sub
''' <summary>
''' Helper function to enable setting field widths without diming an array
''' </summary>
''' <param name="fieldWidths">A list of field widths</param>
''' <remarks></remarks>
Public Sub SetFieldWidths(ByVal ParamArray fieldWidths As Integer())
Me.FieldWidths = fieldWidths
End Sub
''' <summary>
''' Indicates whether or not leading and trailing white space should be removed when returning a field
''' </summary>
''' <value>True if white space should be removed, otherwise False</value>
''' <remarks></remarks>
Public Property TrimWhiteSpace() As Boolean
Get
Return m_TrimWhiteSpace
End Get
Set(ByVal value As Boolean)
m_TrimWhiteSpace = value
End Set
End Property
''' <summary>
''' Reads and returns the next line from the file
''' </summary>
''' <returns>The line read or Nothing if at the end of the file</returns>
''' <remarks>This is data unaware method. It simply reads the next line in the file.</remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Function ReadLine() As String
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
Dim Line As String
' Set the method to be used when we reach the end of the buffer
Dim BufferFunction As New ChangeBufferFunction(AddressOf ReadToBuffer)
Line = ReadNextLine(m_Position, BufferFunction)
If Line Is Nothing Then
FinishReading()
Return Nothing
Else
m_LineNumber += 1
Return Line.TrimEnd(Chr(13), Chr(10))
End If
End Function
''' <summary>
''' Reads a non ignorable line and parses it into fields
''' </summary>
''' <returns>The line parsed into fields</returns>
''' <remarks>This is a data aware method. Comments and blank lines are ignored.</remarks>
Public Function ReadFields() As String()
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
ValidateReadyToRead()
Select Case m_TextFieldType
Case FieldType.FixedWidth
Return ParseFixedWidthLine()
Case FieldType.Delimited
Return ParseDelimitedLine()
Case Else
Debug.Fail("The TextFieldType is not supported")
End Select
Return Nothing
End Function
''' <summary>
''' Enables looking at the passed in number of characters of the next data line without reading the line
''' </summary>
''' <param name="numberOfChars"></param>
''' <returns>A string consisting of the first NumberOfChars characters of the next line</returns>
''' <remarks>If numberOfChars is greater than the next line, only the next line is returned</remarks>
Public Function PeekChars(ByVal numberOfChars As Integer) As String
If numberOfChars <= 0 Then
Throw GetArgumentExceptionWithArgName("numberOfChars", SR.TextFieldParser_NumberOfCharsMustBePositive)
End If
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
' If we know there's no more data return Nothing
If m_EndOfData Then
Return Nothing
End If
' Get the next line without reading it
Dim Line As String = PeekNextDataLine()
If Line Is Nothing Then
m_EndOfData = True
Return Nothing
End If
' Strip of end of line chars
Line = Line.TrimEnd(Chr(13), Chr(10))
' If the number of chars is larger than the line, return the whole line. Otherwise
' return the NumberOfChars characters from the beginning of the line
If Line.Length < numberOfChars Then
Return Line
Else
Dim info As New StringInfo(Line)
Return info.SubstringByTextElements(0, numberOfChars)
End If
End Function
''' <summary>
''' Reads the file starting at the current position and moving to the end of the file
''' </summary>
''' <returns>The contents of the file from the current position to the end of the file</returns>
''' <remarks>This is not a data aware method. Everything in the file from the current position to the end is read</remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Function ReadToEnd() As String
If m_Reader Is Nothing Or m_Buffer Is Nothing Then
Return Nothing
End If
Dim Builder As New System.Text.StringBuilder(m_Buffer.Length)
' Get the lines in the Buffer first
Builder.Append(m_Buffer, m_Position, m_CharsRead - m_Position)
' Add what we haven't read
Builder.Append(m_Reader.ReadToEnd())
FinishReading()
Return Builder.ToString()
End Function
''' <summary>
''' Indicates whether or not to handle quotes in a csv friendly way
''' </summary>
''' <value>True if we escape quotes otherwise false</value>
''' <remarks></remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Property HasFieldsEnclosedInQuotes() As Boolean
Get
Return m_HasFieldsEnclosedInQuotes
End Get
Set(ByVal value As Boolean)
m_HasFieldsEnclosedInQuotes = value
End Set
End Property
''' <summary>
''' Closes the StreamReader
''' </summary>
''' <remarks></remarks>
<EditorBrowsable(EditorBrowsableState.Advanced)>
Public Sub Close()
CloseReader()
End Sub
''' <summary>
''' Closes the StreamReader
''' </summary>
''' <remarks></remarks>
Public Sub Dispose() Implements System.IDisposable.Dispose
Dispose(True)
GC.SuppressFinalize(Me)
End Sub
''' <summary>
''' Standard implementation of IDisposable.Dispose for non sealed classes. Classes derived from
''' TextFieldParser should override this method. After doing their own cleanup, they should call
''' this method (MyBase.Dispose(disposing))
''' </summary>
''' <param name="disposing">Indicates we are called by Dispose and not GC</param>
''' <remarks></remarks>
Protected Overridable Sub Dispose(ByVal disposing As Boolean)
If disposing Then
If Not Me.m_Disposed Then
Close()
End If
Me.m_Disposed = True
End If
End Sub
''' <summary>
''' Validates that the value being passed as an FieldType is a legal value
''' </summary>
''' <param name="value"></param>
''' <remarks></remarks>
Private Sub ValidateFieldTypeEnumValue(ByVal value As FieldType, ByVal paramName As String)
If value < FieldType.Delimited OrElse value > FieldType.FixedWidth Then
Throw New System.ComponentModel.InvalidEnumArgumentException(paramName, DirectCast(value, Integer), GetType(FieldType))
End If
End Sub
''' <summary>
''' Clean up following dispose pattern
''' </summary>
''' <remarks></remarks>
Protected Overrides Sub Finalize()
' Do not change this code. Put cleanup code in Dispose(ByVal disposing As Boolean) above.
Dispose(False)
MyBase.Finalize()
End Sub
''' <summary>
''' Closes the StreamReader
''' </summary>
''' <remarks></remarks>
Private Sub CloseReader()
FinishReading()
If m_Reader IsNot Nothing Then
If Not m_LeaveOpen Then
m_Reader.Close()
End If
m_Reader = Nothing
End If
End Sub
''' <summary>
''' Cleans up managed resources except the StreamReader and indicates reading is finished
''' </summary>
''' <remarks></remarks>
Private Sub FinishReading()
m_LineNumber = -1
m_EndOfData = True
m_Buffer = Nothing
m_DelimiterRegex = Nothing
m_BeginQuotesRegex = Nothing
End Sub
''' <summary>
''' Creates a StreamReader for the passed in Path
''' </summary>
''' <param name="path">The passed in path</param>
''' <param name="defaultEncoding">The encoding to default to if encoding can't be detected</param>
''' <param name="detectEncoding">Indicates whether or not to detect encoding from the BOM</param>
''' <remarks>We validate the arguments here for the three Public constructors that take a Path</remarks>
Private Sub InitializeFromPath(ByVal path As String, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
If path = "" Then
Throw GetArgumentNullException("path")
End If
If defaultEncoding Is Nothing Then
Throw GetArgumentNullException("defaultEncoding")
End If
Dim fullPath As String = ValidatePath(path)
Dim fileStreamTemp As New FileStream(fullPath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
m_Reader = New StreamReader(fileStreamTemp, defaultEncoding, detectEncoding)
ReadToBuffer()
End Sub
''' <summary>
''' Creates a StreamReader for a passed in stream
''' </summary>
''' <param name="stream">The passed in stream</param>
''' <param name="defaultEncoding">The encoding to default to if encoding can't be detected</param>
''' <param name="detectEncoding">Indicates whether or not to detect encoding from the BOM</param>
''' <remarks>We validate the arguments here for the three Public constructors that take a Stream</remarks>
Private Sub InitializeFromStream(ByVal stream As Stream, ByVal defaultEncoding As System.Text.Encoding, ByVal detectEncoding As Boolean)
If stream Is Nothing Then
Throw GetArgumentNullException("stream")
End If
If Not stream.CanRead Then
Throw GetArgumentExceptionWithArgName("stream", SR.TextFieldParser_StreamNotReadable, "stream")
End If
If defaultEncoding Is Nothing Then
Throw GetArgumentNullException("defaultEncoding")
End If
m_Reader = New StreamReader(stream, defaultEncoding, detectEncoding)
ReadToBuffer()
End Sub
''' <summary>
''' Gets full name and path from passed in path.
''' </summary>
''' <param name="path">The path to be validated</param>
''' <returns>The full name and path</returns>
''' <remarks>Throws if the file doesn't exist or if the path is malformed</remarks>
Private Function ValidatePath(ByVal path As String) As String
' Validate and get full path
Dim fullPath As String = FileSystem.NormalizeFilePath(path, "path")
' Make sure the file exists
If Not File.Exists(fullPath) Then
Throw New IO.FileNotFoundException(SR.Format(SR.IO_FileNotFound_Path, fullPath))
End If
Return fullPath
End Function
''' <summary>
''' Indicates whether or not the passed in line should be ignored
''' </summary>
''' <param name="line">The line to be tested</param>
''' <returns>True if the line should be ignored, otherwise False</returns>
''' <remarks>Lines to ignore are blank lines and comments</remarks>
Private Function IgnoreLine(ByVal line As String) As Boolean
' If the Line is Nothing, it has meaning (we've reached the end of the file) so don't
' ignore it
If line Is Nothing Then
Return False
End If
' Ignore empty or whitespace lines
Dim TrimmedLine As String = line.Trim()
If TrimmedLine.Length = 0 Then
Return True
End If
' Ignore comments
If m_CommentTokens IsNot Nothing Then
For Each Token As String In m_CommentTokens
If Token = "" Then
Continue For
End If
If TrimmedLine.StartsWith(Token, StringComparison.Ordinal) Then
Return True
End If
' Test original line in case whitespace char is a comment token
If line.StartsWith(Token, StringComparison.Ordinal) Then
Return True
End If
Next
End If
Return False
End Function
''' <summary>
''' Reads characters from the file into the buffer
''' </summary>
''' <returns>The number of Chars read. If no Chars are read, we're at the end of the file</returns>
''' <remarks></remarks>
Private Function ReadToBuffer() As Integer
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(m_Reader IsNot Nothing, "There's no StreamReader")
' Set cursor to beginning of buffer
m_Position = 0
Dim BufferLength As Integer = m_Buffer.Length
Debug.Assert(BufferLength >= DEFAULT_BUFFER_LENGTH, "Buffer shrunk to below default")
' If the buffer has grown, shrink it back to the default size
If BufferLength > DEFAULT_BUFFER_LENGTH Then
BufferLength = DEFAULT_BUFFER_LENGTH
ReDim m_Buffer(BufferLength - 1)
End If
' Read from the stream
m_CharsRead = m_Reader.Read(m_Buffer, 0, BufferLength)
' Return the number of Chars read
Return m_CharsRead
End Function
''' <summary>
''' Moves the cursor and all the data to the right of the cursor to the front of the buffer. It
''' then fills the remainder of the buffer from the file
''' </summary>
''' <returns>The number of Chars read in filling the remainder of the buffer</returns>
''' <remarks>
''' This should be called when we want to make maximum use of the space in the buffer. Characters
''' to the left of the cursor have already been read and can be discarded.
'''</remarks>
Private Function SlideCursorToStartOfBuffer() As Integer
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(m_Reader IsNot Nothing, "There's no StreamReader")
Debug.Assert(m_Position >= 0 And m_Position <= m_Buffer.Length, "The cursor is out of range")
' No need to slide if we're already at the beginning
If m_Position > 0 Then
Dim ContentLength As Integer = m_CharsRead - m_Position
Array.Copy(m_Buffer, m_Position, m_Buffer, 0, ContentLength)
' Try to fill the rest of the buffer
Dim CharsRead As Integer = m_Reader.Read(m_Buffer, ContentLength, m_Buffer.Length - ContentLength)
m_CharsRead = ContentLength + CharsRead
m_Position = 0
Return CharsRead
End If
Return 0
End Function
''' <summary>
''' Increases the size of the buffer. Used when we are at the end of the buffer, we need
''' to read more data from the file, and we can't discard what we've already read.
''' </summary>
''' <returns>The number of characters read to fill the new buffer</returns>
''' <remarks>This is needed for PeekChars and EndOfData</remarks>
Private Function IncreaseBufferSize() As Integer
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(m_Reader IsNot Nothing, "There's no StreamReader")
Debug.Assert(m_Position = 0, "Non-zero position")
' Set cursor
m_PeekPosition = m_CharsRead
If m_CharsRead = m_Buffer.Length Then
' Create a larger buffer and copy our data into it
Dim BufferSize As Integer = m_Buffer.Length + DEFAULT_BUFFER_LENGTH
' Make sure the buffer hasn't grown too large
If BufferSize > m_MaxBufferSize Then
Throw GetInvalidOperationException(SR.TextFieldParser_BufferExceededMaxSize)
End If
Dim TempArray(BufferSize - 1) As Char
Array.Copy(m_Buffer, TempArray, m_Buffer.Length)
m_Buffer = TempArray
End If
Dim CharsRead As Integer = m_Reader.Read(m_Buffer, m_CharsRead, m_Buffer.Length - m_CharsRead)
Debug.Assert(CharsRead <= m_Buffer.Length - m_CharsRead, "We've read more chars than we have space for")
m_CharsRead += CharsRead
Return CharsRead
End Function
''' <summary>
''' Returns the next line of data or nothing if there's no more data to be read
''' </summary>
''' <returns>The next line of data</returns>
''' <remarks>Moves the cursor past the line read</remarks>
Private Function ReadNextDataLine() As String
Dim Line As String
' Set function to use when we reach the end of the buffer
Dim BufferFunction As New ChangeBufferFunction(AddressOf ReadToBuffer)
Do
Line = ReadNextLine(m_Position, BufferFunction)
m_LineNumber += 1
Loop While IgnoreLine(Line)
If Line Is Nothing Then
CloseReader()
End If
Return Line
End Function
''' <summary>
''' Returns the next data line but doesn't move the cursor
''' </summary>
''' <returns>The next data line, or Nothing if there's no more data</returns>
''' <remarks></remarks>
Private Function PeekNextDataLine() As String
Dim Line As String
' Set function to use when we reach the end of the buffer
Dim BufferFunction As New ChangeBufferFunction(AddressOf IncreaseBufferSize)
' Slide the data to the left so that we make maximum use of the buffer
SlideCursorToStartOfBuffer()
m_PeekPosition = 0
Do
Line = ReadNextLine(m_PeekPosition, BufferFunction)
Loop While IgnoreLine(Line)
Return Line
End Function
''' <summary>
''' Function to call when we're at the end of the buffer. We either re fill the buffer
''' or change the size of the buffer
''' </summary>
''' <returns></returns>
''' <remarks></remarks>
Private Delegate Function ChangeBufferFunction() As Integer
''' <summary>
''' Gets the next line from the file and moves the passed in cursor past the line
''' </summary>
''' <param name="Cursor">Indicates the current position in the buffer</param>
''' <param name="ChangeBuffer">Function to call when we've reached the end of the buffer</param>
''' <returns>The next line in the file</returns>
''' <remarks>Returns Nothing if we are at the end of the file</remarks>
Private Function ReadNextLine(ByRef Cursor As Integer, ByVal ChangeBuffer As ChangeBufferFunction) As String
Debug.Assert(m_Buffer IsNot Nothing, "There's no buffer")
Debug.Assert(Cursor >= 0 And Cursor <= m_CharsRead, "The cursor is out of range")
' Check to see if the cursor is at the end of the chars in the buffer. If it is, re fill the buffer
If Cursor = m_CharsRead Then
If ChangeBuffer() = 0 Then
' We're at the end of the file
Return Nothing
End If
End If
Dim Builder As StringBuilder = Nothing
Do
' Walk through buffer looking for the end of a line. End of line can be vbLf (\n), vbCr (\r) or vbCrLf (\r\n)
For i As Integer = Cursor To m_CharsRead - 1
Dim Character As Char = m_Buffer(i)
If Character = vbCr Or Character = vbLf Then
' We've found the end of a line so add everything we've read so far to the
' builder. We include the end of line char because we need to know what it is
' in case it's embedded in a field.
If Builder IsNot Nothing Then
Builder.Append(m_Buffer, Cursor, i - Cursor + 1)
Else
Builder = New StringBuilder(i + 1)
Builder.Append(m_Buffer, Cursor, i - Cursor + 1)
End If
Cursor = i + 1
#Disable Warning CA1834 ' Consider using 'StringBuilder.Append(char)' when applicable
' See if vbLf should be added as well
If Character = vbCr Then
If Cursor < m_CharsRead Then
If m_Buffer(Cursor) = vbLf Then
Cursor += 1
Builder.Append(vbLf)
End If
ElseIf ChangeBuffer() > 0 Then
If m_Buffer(Cursor) = vbLf Then
Cursor += 1
Builder.Append(vbLf)
End If
End If
End If
#Enable Warning CA1834 ' Consider using 'StringBuilder.Append(char)' when applicable
Return Builder.ToString()
End If
Next i
' We've searched the whole buffer and haven't found an end of line. Save what we have, and read more to the buffer.
Dim Size As Integer = m_CharsRead - Cursor
If Builder Is Nothing Then
Builder = New StringBuilder(Size + DEFAULT_BUILDER_INCREASE)
End If
Builder.Append(m_Buffer, Cursor, Size)
Loop While ChangeBuffer() > 0
Return Builder.ToString()
End Function
''' <summary>
''' Gets the next data line and parses it with the delimiters
''' </summary>
''' <returns>An array of the fields in the line</returns>
''' <remarks></remarks>
Private Function ParseDelimitedLine() As String()
Dim Line As String = ReadNextDataLine()
If Line Is Nothing Then
Return Nothing
End If
' The line number is that of the line just read
Dim CurrentLineNumber As Long = m_LineNumber - 1
Dim Index As Integer = 0
Dim Fields As New System.Collections.Generic.List(Of String)
Dim Field As String
Dim LineEndIndex As Integer = GetEndOfLineIndex(Line)
While Index <= LineEndIndex
' Is the field delimited in quotes? We only care about this if
' EscapedQuotes is True
Dim MatchResult As Match = Nothing
Dim QuoteDelimited As Boolean = False
If m_HasFieldsEnclosedInQuotes Then
MatchResult = BeginQuotesRegex.Match(Line, Index)
QuoteDelimited = MatchResult.Success
End If
If QuoteDelimited Then
'Move the Index beyond quote
Index = MatchResult.Index + MatchResult.Length
' Look for the closing "
Dim EndHelper As New QuoteDelimitedFieldBuilder(m_DelimiterWithEndCharsRegex, m_SpaceChars)
EndHelper.BuildField(Line, Index)
If EndHelper.MalformedLine Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedDelimitedLine, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
If EndHelper.FieldFinished Then
Field = EndHelper.Field
Index = EndHelper.Index + EndHelper.DelimiterLength
Else
' We may have an embedded line end character, so grab next line
Dim NewLine As String
Dim EndOfLine As Integer
Do
EndOfLine = Line.Length
' Get the next data line
NewLine = ReadNextDataLine()
' If we didn't get a new line, we're at the end of the file so our original line is malformed
If NewLine Is Nothing Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedDelimitedLine, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
If Line.Length + NewLine.Length > m_MaxLineSize Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MaxLineSizeExceeded, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
Line &= NewLine
LineEndIndex = GetEndOfLineIndex(Line)
EndHelper.BuildField(Line, EndOfLine)
If EndHelper.MalformedLine Then
m_ErrorLine = Line.TrimEnd(Chr(13), Chr(10))
m_ErrorLineNumber = CurrentLineNumber
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedDelimitedLine, CurrentLineNumber.ToString(CultureInfo.InvariantCulture)), CurrentLineNumber)
End If
Loop Until EndHelper.FieldFinished
Field = EndHelper.Field
Index = EndHelper.Index + EndHelper.DelimiterLength
End If
If m_TrimWhiteSpace Then
Field = Field.Trim()
End If
Fields.Add(Field)
Else
' Find the next delimiter
Dim DelimiterMatch As Match = m_DelimiterRegex.Match(Line, Index)
If DelimiterMatch.Success Then
Field = Line.Substring(Index, DelimiterMatch.Index - Index)
If m_TrimWhiteSpace Then
Field = Field.Trim()
End If
Fields.Add(Field)
' Move the index
Index = DelimiterMatch.Index + DelimiterMatch.Length
Else
' We're at the end of the line so the field consists of all that's left of the line
' minus the end of line chars
Field = Line.Substring(Index).TrimEnd(Chr(13), Chr(10))
If m_TrimWhiteSpace Then
Field = Field.Trim()
End If
Fields.Add(Field)
Exit While
End If
End If
End While
Return Fields.ToArray()
End Function
''' <summary>
''' Gets the next data line and parses into fixed width fields
''' </summary>
''' <returns>An array of the fields in the line</returns>
''' <remarks></remarks>
Private Function ParseFixedWidthLine() As String()
Debug.Assert(m_FieldWidths IsNot Nothing, "No field widths")
Dim Line As String = ReadNextDataLine()
If Line Is Nothing Then
Return Nothing
End If
' Strip off trailing carriage return or line feed
Line = Line.TrimEnd(Chr(13), Chr(10))
Dim LineInfo As New StringInfo(Line)
ValidateFixedWidthLine(LineInfo, m_LineNumber - 1)
Dim Index As Integer = 0
Dim Bound As Integer = m_FieldWidths.Length - 1
Dim Fields(Bound) As String
For i As Integer = 0 To Bound
Fields(i) = GetFixedWidthField(LineInfo, Index, m_FieldWidths(i))
Index += m_FieldWidths(i)
Next
Return Fields
End Function
''' <summary>
''' Returns the field at the passed in index
''' </summary>
''' <param name="Line">The string containing the fields</param>
''' <param name="Index">The start of the field</param>
''' <param name="FieldLength">The length of the field</param>
''' <returns>The field</returns>
''' <remarks></remarks>
Private Function GetFixedWidthField(ByVal Line As StringInfo, ByVal Index As Integer, ByVal FieldLength As Integer) As String
Dim Field As String
If FieldLength > 0 Then
Field = Line.SubstringByTextElements(Index, FieldLength)
Else
' Make sure the index isn't past the string
If Index >= Line.LengthInTextElements Then
Field = String.Empty
Else
Field = Line.SubstringByTextElements(Index).TrimEnd(Chr(13), Chr(10))
End If
End If
If m_TrimWhiteSpace Then
Return Field.Trim()
Else
Return Field
End If
End Function
''' <summary>
''' Gets the index of the first end of line character
''' </summary>
''' <param name="Line"></param>
''' <returns></returns>
''' <remarks>When there are no end of line characters, the index is the length (one past the end)</remarks>
Private Function GetEndOfLineIndex(ByVal Line As String) As Integer
Debug.Assert(Line IsNot Nothing, "We are parsing a Nothing")
Dim Length As Integer = Line.Length
Debug.Assert(Length > 0, "A blank line shouldn't be parsed")
If Length = 1 Then
Debug.Assert(Line(0) <> vbCr And Line(0) <> vbLf, "A blank line shouldn't be parsed")
Return Length
End If
' Check the next to last and last char for end line characters
If Line(Length - 2) = vbCr Or Line(Length - 2) = vbLf Then
Return Length - 2
ElseIf Line(Length - 1) = vbCr Or Line(Length - 1) = vbLf Then
Return Length - 1
Else
Return Length
End If
End Function
''' <summary>
''' Indicates whether or not a line is valid
''' </summary>
''' <param name="Line">The line to be tested</param>
''' <param name="LineNumber">The line number, used for exception</param>
''' <remarks></remarks>
Private Sub ValidateFixedWidthLine(ByVal Line As StringInfo, ByVal LineNumber As Long)
Debug.Assert(Line IsNot Nothing, "No Line sent")
' The only malformed line for fixed length fields is one that's too short
If Line.LengthInTextElements < m_LineLength Then
m_ErrorLine = Line.String
m_ErrorLineNumber = m_LineNumber - 1
Throw New MalformedLineException(SR.Format(SR.TextFieldParser_MalFormedFixedWidthLine, LineNumber.ToString(CultureInfo.InvariantCulture)), LineNumber)
End If
End Sub
''' <summary>
''' Determines whether or not the field widths are valid, and sets the size of a line
''' </summary>
''' <remarks></remarks>
Private Sub ValidateFieldWidths()
If m_FieldWidths Is Nothing Then
Throw GetInvalidOperationException(SR.TextFieldParser_FieldWidthsNothing)
End If
If m_FieldWidths.Length = 0 Then
Throw GetInvalidOperationException(SR.TextFieldParser_FieldWidthsNothing)
End If
Dim WidthBound As Integer = m_FieldWidths.Length - 1
m_LineLength = 0
' add all but the last element
For i As Integer = 0 To WidthBound - 1
Debug.Assert(m_FieldWidths(i) > 0, "Bad field width, this should have been caught on input")
m_LineLength += m_FieldWidths(i)
Next
' add the last field if it's greater than zero (IE not ragged).
If m_FieldWidths(WidthBound) > 0 Then
m_LineLength += m_FieldWidths(WidthBound)
End If
End Sub
''' <summary>
''' Checks the field widths at input.
''' </summary>
''' <param name="Widths"></param>
''' <remarks>
''' All field widths, except the last one, must be greater than zero. If the last width is
''' less than one it indicates the last field is ragged
'''</remarks>
Private Sub ValidateFieldWidthsOnInput(ByVal Widths() As Integer)
Debug.Assert(Widths IsNot Nothing, "There are no field widths")
Dim Bound As Integer = Widths.Length - 1
For i As Integer = 0 To Bound - 1
If Widths(i) < 1 Then
Throw GetArgumentExceptionWithArgName("FieldWidths", SR.TextFieldParser_FieldWidthsMustPositive)
End If
Next
End Sub
''' <summary>
''' Validates the delimiters and creates the Regex objects for finding delimiters or quotes followed
''' by delimiters
''' </summary>
''' <remarks></remarks>
Private Sub ValidateAndEscapeDelimiters()
If m_Delimiters Is Nothing Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_DelimitersNothing)
End If
If m_Delimiters.Length = 0 Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_DelimitersNothing)
End If
Dim Length As Integer = m_Delimiters.Length
Dim Builder As StringBuilder = New StringBuilder()
Dim QuoteBuilder As StringBuilder = New StringBuilder()
' Add ending quote pattern. It will be followed by delimiters resulting in a string like:
' "[ ]*(d1|d2|d3)
QuoteBuilder.Append(EndQuotePattern & "(")
For i As Integer = 0 To Length - 1
If m_Delimiters(i) IsNot Nothing Then
' Make sure delimiter is legal
If m_HasFieldsEnclosedInQuotes Then
If m_Delimiters(i).IndexOf(""""c) > -1 Then
Throw GetInvalidOperationException(SR.TextFieldParser_IllegalDelimiter)
End If
End If
Dim EscapedDelimiter As String = Regex.Escape(m_Delimiters(i))
Builder.Append(EscapedDelimiter & "|")
QuoteBuilder.Append(EscapedDelimiter & "|")
Else
Debug.Fail("Delimiter element is empty. This should have been caught on input")
End If
Next
m_SpaceChars = WhitespaceCharacters
' Get rid of trailing | and set regex
m_DelimiterRegex = New Regex(Builder.ToString(0, Builder.Length - 1), REGEX_OPTIONS)
Builder.Append(vbCr & "|" & vbLf)
m_DelimiterWithEndCharsRegex = New Regex(Builder.ToString(), REGEX_OPTIONS)
' Add end of line (either Cr, Ln, or nothing) and set regex
QuoteBuilder.Append(vbCr & "|" & vbLf & ")|""$")
End Sub
''' <summary>
''' Checks property settings to ensure we're able to read fields.
''' </summary>
''' <remarks>Throws if we're not able to read fields with current property settings</remarks>
Private Sub ValidateReadyToRead()
If m_NeedPropertyCheck Or ArrayHasChanged() Then
Select Case m_TextFieldType
Case FieldType.Delimited
ValidateAndEscapeDelimiters()
Case FieldType.FixedWidth
' Check FieldWidths
ValidateFieldWidths()
Case Else
Debug.Fail("Unknown TextFieldType")
End Select
' Check Comment Tokens
If m_CommentTokens IsNot Nothing Then
For Each Token As String In m_CommentTokens
If Token <> "" Then
If m_HasFieldsEnclosedInQuotes And m_TextFieldType = FieldType.Delimited Then
If String.Equals(Token.Trim(), """", StringComparison.Ordinal) Then
Throw GetInvalidOperationException(SR.TextFieldParser_InvalidComment)
End If
End If
End If
Next
End If
m_NeedPropertyCheck = False
End If
End Sub
''' <summary>
''' Throws if any of the delimiters contain line end characters
''' </summary>
''' <param name="delimiterArray">A string array of delimiters</param>
''' <remarks></remarks>
Private Sub ValidateDelimiters(ByVal delimiterArray() As String)
If delimiterArray Is Nothing Then
Return
End If
For Each delimiter As String In delimiterArray
If delimiter = "" Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_DelimiterNothing, "Delimiters")
End If
If delimiter.IndexOfAny(New Char() {Chr(13), Chr(10)}) > -1 Then
Throw GetArgumentExceptionWithArgName("Delimiters", SR.TextFieldParser_EndCharsInDelimiter)
End If
Next
End Sub
''' <summary>
''' Determines if the FieldWidths or Delimiters arrays have changed.
''' </summary>
''' <remarks>If the array has changed, we need to re initialize before reading.</remarks>
Private Function ArrayHasChanged() As Boolean
Dim lowerBound As Integer = 0
Dim upperBound As Integer = 0
Select Case m_TextFieldType
Case FieldType.Delimited
Debug.Assert((m_DelimitersCopy Is Nothing And m_Delimiters Is Nothing) Or (m_DelimitersCopy IsNot Nothing And m_Delimiters IsNot Nothing), "Delimiters and copy are not both Nothing or both not Nothing")
' Check null cases
If m_Delimiters Is Nothing Then
Return False
End If
lowerBound = m_DelimitersCopy.GetLowerBound(0)
upperBound = m_DelimitersCopy.GetUpperBound(0)
For i As Integer = lowerBound To upperBound
If m_Delimiters(i) <> m_DelimitersCopy(i) Then
Return True
End If
Next i
Case FieldType.FixedWidth
Debug.Assert((m_FieldWidthsCopy Is Nothing And m_FieldWidths Is Nothing) Or (m_FieldWidthsCopy IsNot Nothing And m_FieldWidths IsNot Nothing), "FieldWidths and copy are not both Nothing or both not Nothing")
' Check null cases
If m_FieldWidths Is Nothing Then
Return False
End If
lowerBound = m_FieldWidthsCopy.GetLowerBound(0)
upperBound = m_FieldWidthsCopy.GetUpperBound(0)
For i As Integer = lowerBound To upperBound
If m_FieldWidths(i) <> m_FieldWidthsCopy(i) Then
Return True
End If
Next i
Case Else
Debug.Fail("Unknown TextFieldType")
End Select
Return False
End Function
''' <summary>
''' Throws if any of the comment tokens contain whitespace
''' </summary>
''' <param name="tokens">A string array of comment tokens</param>
''' <remarks></remarks>
Private Sub CheckCommentTokensForWhitespace(ByVal tokens() As String)
If tokens Is Nothing Then
Return
End If
For Each token As String In tokens
If m_WhiteSpaceRegEx.IsMatch(token) Then
Throw GetArgumentExceptionWithArgName("CommentTokens", SR.TextFieldParser_WhitespaceInToken)
End If
Next
End Sub
''' <summary>
''' Gets the appropriate regex for finding a field beginning with quotes
''' </summary>
''' <value>The right regex</value>
''' <remarks></remarks>
Private ReadOnly Property BeginQuotesRegex() As Regex
Get
If m_BeginQuotesRegex Is Nothing Then
' Get the pattern
Dim pattern As String = String.Format(CultureInfo.InvariantCulture, BEGINS_WITH_QUOTE, WhitespacePattern)
m_BeginQuotesRegex = New Regex(pattern, REGEX_OPTIONS)
End If
Return m_BeginQuotesRegex
End Get
End Property
''' <summary>
''' Gets the appropriate expression for finding ending quote of a field
''' </summary>
''' <value>The expression</value>
''' <remarks></remarks>
Private ReadOnly Property EndQuotePattern() As String
Get
Return String.Format(CultureInfo.InvariantCulture, ENDING_QUOTE, WhitespacePattern)
End Get
End Property
''' <summary>
''' Returns a string containing all the characters which are whitespace for parsing purposes
''' </summary>
''' <value></value>
''' <remarks></remarks>
Private ReadOnly Property WhitespaceCharacters() As String
Get
Dim builder As New StringBuilder
For Each code As Integer In m_WhitespaceCodes
Dim spaceChar As Char = ChrW(code)
If Not CharacterIsInDelimiter(spaceChar) Then
builder.Append(spaceChar)
End If
Next
Return builder.ToString()
End Get
End Property
''' <summary>
''' Gets the character set of white-spaces to be used in a regex pattern
''' </summary>
''' <value></value>
''' <remarks></remarks>
Private ReadOnly Property WhitespacePattern() As String
Get
Dim builder As New StringBuilder()
For Each code As Integer In m_WhitespaceCodes
Dim spaceChar As Char = ChrW(code)
If Not CharacterIsInDelimiter(spaceChar) Then
' Gives us something like \u00A0
builder.Append("\u" & code.ToString("X4", CultureInfo.InvariantCulture))
End If
Next
Return builder.ToString()
End Get
End Property
''' <summary>
''' Checks to see if the passed in character is in any of the delimiters
''' </summary>
''' <param name="testCharacter">The character to look for</param>
''' <returns>True if the character is found in a delimiter, otherwise false</returns>
''' <remarks></remarks>
Private Function CharacterIsInDelimiter(ByVal testCharacter As Char) As Boolean
Debug.Assert(m_Delimiters IsNot Nothing, "No delimiters set!")
For Each delimiter As String In m_Delimiters
If delimiter.IndexOf(testCharacter) > -1 Then
Return True
End If
Next
Return False
End Function
' Indicates reader has been disposed
Private m_Disposed As Boolean
' The internal StreamReader that reads the file
Private m_Reader As TextReader
' An array holding the strings that indicate a line is a comment
Private m_CommentTokens() As String = Array.Empty(Of String)()
' The line last read by either ReadLine or ReadFields
Private m_LineNumber As Long = 1
' Flags whether or not there is data left to read. Assume there is at creation
Private m_EndOfData As Boolean
' Holds the last malformed line
Private m_ErrorLine As String = ""
' Holds the line number of the last malformed line
Private m_ErrorLineNumber As Long = -1
' Indicates what type of fields are in the file (fixed width or delimited)
Private m_TextFieldType As FieldType = FieldType.Delimited
' An array of the widths of the fields in a fixed width file
Private m_FieldWidths() As Integer
' An array of the delimiters used for the fields in the file
Private m_Delimiters() As String
' Holds a copy of the field widths last set so we can respond to changes in the array
Private m_FieldWidthsCopy() As Integer
' Holds a copy of the field widths last set so we can respond to changes in the array
Private m_DelimitersCopy() As String
' Regular expression used to find delimiters
Private m_DelimiterRegex As Regex
' Regex used with BuildField
Private m_DelimiterWithEndCharsRegex As Regex
' Options used for regular expressions
Private Const REGEX_OPTIONS As RegexOptions = RegexOptions.CultureInvariant
' Codes for whitespace as used by String.Trim excluding line end chars as those are handled separately
Private m_WhitespaceCodes() As Integer = {&H9, &HB, &HC, &H20, &H85, &HA0, &H1680, &H2000, &H2001, &H2002, &H2003, &H2004, &H2005, &H2006, &H2007, &H2008, &H2009, &H200A, &H200B, &H2028, &H2029, &H3000, &HFEFF}
' Regular expression used to find beginning quotes ignore spaces and tabs
Private m_BeginQuotesRegex As Regex
' Regular expression for whitespace
Private m_WhiteSpaceRegEx As Regex = New Regex("\s", REGEX_OPTIONS)
' Indicates whether or not white space should be removed from a returned field
Private m_TrimWhiteSpace As Boolean = True
' The position of the cursor in the buffer
Private m_Position As Integer
' The position of the peek cursor
Private m_PeekPosition As Integer
' The number of chars in the buffer
Private m_CharsRead As Integer
' Indicates that the user has changed properties so that we need to validate before a read
Private m_NeedPropertyCheck As Boolean = True
' The default size for the buffer
Private Const DEFAULT_BUFFER_LENGTH As Integer = 4096
' This is a guess as to how much larger the string builder should be beyond the size of what
' we've already read
Private Const DEFAULT_BUILDER_INCREASE As Integer = 10
' Buffer used to hold data read from the file. It holds data that must be read
' ahead of the cursor (for PeekChars and EndOfData)
Private m_Buffer(DEFAULT_BUFFER_LENGTH - 1) As Char
' The minimum length for a valid fixed width line
Private m_LineLength As Integer
' Indicates whether or not we handle quotes in a csv appropriate way
Private m_HasFieldsEnclosedInQuotes As Boolean = True
' A string of the chars that count as spaces (used for csv format). The norm is spaces and tabs.
Private m_SpaceChars As String
' The largest size a line can be.
Private m_MaxLineSize As Integer = 10000000
' The largest size the buffer can be
Private m_MaxBufferSize As Integer = 10000000
' Regex pattern to determine if field begins with quotes
Private Const BEGINS_WITH_QUOTE As String = "\G[{0}]*"""
' Regex pattern to find a quote before a delimiter
Private Const ENDING_QUOTE As String = """[{0}]*"
' Indicates passed in stream should be not be closed
Private m_LeaveOpen As Boolean
End Class
''' <summary>
''' Enum used to indicate the kind of file being read, either delimited or fixed length
''' </summary>
''' <remarks></remarks>
Public Enum FieldType As Integer
'!!!!!!!!!! Changes to this enum must be reflected in ValidateFieldTypeEnumValue()
Delimited
FixedWidth
End Enum
''' <summary>
''' Helper class that when passed a line and an index to a quote delimited field
''' will build the field and handle escaped quotes
''' </summary>
''' <remarks></remarks>
Friend NotInheritable Class QuoteDelimitedFieldBuilder
''' <summary>
''' Creates an instance of the class and sets some properties
''' </summary>
''' <param name="DelimiterRegex">The regex used to find any of the delimiters</param>
''' <param name="SpaceChars">Characters treated as space (usually space and tab)</param>
''' <remarks></remarks>
Public Sub New(ByVal DelimiterRegex As Regex, ByVal SpaceChars As String)
m_DelimiterRegex = DelimiterRegex
m_SpaceChars = SpaceChars
End Sub
''' <summary>
''' Indicates whether or not the field has been built.
''' </summary>
''' <value>True if the field has been built, otherwise False</value>
''' <remarks>If the Field has been built, the Field property will return the entire field</remarks>
Public ReadOnly Property FieldFinished() As Boolean
Get
Return m_FieldFinished
End Get
End Property
''' <summary>
''' The field being built
''' </summary>
''' <value>The field</value>
''' <remarks></remarks>
Public ReadOnly Property Field() As String
Get
Return m_Field.ToString()
End Get
End Property
''' <summary>
''' The current index on the line. Used to indicate how much of the line was used to build the field
''' </summary>
''' <value>The current position on the line</value>
''' <remarks></remarks>
Public ReadOnly Property Index() As Integer
Get
Return m_Index
End Get
End Property
''' <summary>
''' The length of the closing delimiter if one was found
''' </summary>
''' <value>The length of the delimiter</value>
''' <remarks></remarks>
Public ReadOnly Property DelimiterLength() As Integer
Get
Return m_DelimiterLength
End Get
End Property
''' <summary>
''' Indicates that the current field breaks the subset of csv rules we enforce
''' </summary>
''' <value>True if the line is malformed, otherwise False</value>
''' <remarks>
''' The rules we enforce are:
''' Embedded quotes must be escaped
''' Only space characters can occur between a delimiter and a quote
'''</remarks>
Public ReadOnly Property MalformedLine() As Boolean
Get
Return m_MalformedLine
End Get
End Property
''' <summary>
''' Builds a field by walking through the passed in line starting at StartAt
''' </summary>
''' <param name="Line">The line containing the data</param>
''' <param name="StartAt">The index at which we start building the field</param>
''' <remarks></remarks>
Public Sub BuildField(ByVal Line As String, ByVal StartAt As Integer)
m_Index = StartAt
Dim Length As Integer = Line.Length
While m_Index < Length
If Line(m_Index) = """"c Then
' Are we at the end of the file?
If m_Index + 1 = Length Then
' We've found the end of the field
m_FieldFinished = True
m_DelimiterLength = 1
' Move index past end of line
m_Index += 1
Return
End If
' Check to see if this is an escaped quote
If m_Index + 1 < Line.Length And Line(m_Index + 1) = """"c Then
m_Field.Append(""""c)
m_Index += 2
Continue While
End If
' Find the next delimiter and make sure everything between the quote and
' the delimiter is ignorable
Dim Limit As Integer
Dim DelimiterMatch As Match = m_DelimiterRegex.Match(Line, m_Index + 1)
If Not DelimiterMatch.Success Then
Limit = Length - 1
Else
Limit = DelimiterMatch.Index - 1
End If
For i As Integer = m_Index + 1 To Limit
If m_SpaceChars.IndexOf(Line(i)) < 0 Then
m_MalformedLine = True
Return
End If
Next
' The length of the delimiter is the length of the closing quote (1) + any spaces + the length
' of the delimiter we matched if any
m_DelimiterLength = 1 + Limit - m_Index
If DelimiterMatch.Success Then
m_DelimiterLength += DelimiterMatch.Length
End If
m_FieldFinished = True
Return
Else
m_Field.Append(Line(m_Index))
m_Index += 1
End If
End While
End Sub
' String builder holding the field
Private m_Field As New StringBuilder
' Indicates m_Field contains the entire field
Private m_FieldFinished As Boolean
' The current index on the field
Private m_Index As Integer
' The length of the closing delimiter if one is found
Private m_DelimiterLength As Integer
' The regular expression used to find the next delimiter
Private m_DelimiterRegex As Regex
' Chars that should be counted as space (and hence ignored if occurring before or after a delimiter
Private m_SpaceChars As String
' Indicates the line breaks the csv rules we enforce
Private m_MalformedLine As Boolean
End Class
End Namespace
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/mono/mono/tests/bug-36848.cs | // Load an interface from an invalid DLL and ensure the failure is clean.
// Notice this is very similar to bug-81673, except the interface is loaded
// through a transparent proxy instead of directly.
using System;
using System.Runtime.Remoting;
using System.Runtime.Remoting.Proxies;
using System.Runtime.Remoting.Messaging;
namespace Application
{
public class App
{
public static void Test ()
{
RemoteProxy remote2 = new RemoteProxy (typeof(App).Assembly.GetType("Application.Remote"));
remote2.GetTransparentProxy ();
}
public static int Main ()
{
int numCaught = 0;
for (int i = 0; i < 10; ++i) {
try {
Test ();
} catch (Exception) {
++numCaught;
}
}
if (numCaught == 10)
return 0;
return 1;
}
}
class Remote : MarshalByRefObject, IMyInterface {
public void Run ()
{
}
}
class RemoteProxy : RealProxy {
public RemoteProxy (Type t) : base (t) {
}
public override IMessage Invoke (IMessage request) {
return null;
}
}
}
| // Load an interface from an invalid DLL and ensure the failure is clean.
// Notice this is very similar to bug-81673, except the interface is loaded
// through a transparent proxy instead of directly.
using System;
using System.Runtime.Remoting;
using System.Runtime.Remoting.Proxies;
using System.Runtime.Remoting.Messaging;
namespace Application
{
public class App
{
public static void Test ()
{
RemoteProxy remote2 = new RemoteProxy (typeof(App).Assembly.GetType("Application.Remote"));
remote2.GetTransparentProxy ();
}
public static int Main ()
{
int numCaught = 0;
for (int i = 0; i < 10; ++i) {
try {
Test ();
} catch (Exception) {
++numCaught;
}
}
if (numCaught == 10)
return 0;
return 1;
}
}
class Remote : MarshalByRefObject, IMyInterface {
public void Run ()
{
}
}
class RemoteProxy : RealProxy {
public RemoteProxy (Type t) : base (t) {
}
public override IMessage Invoke (IMessage request) {
return null;
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/Common/src/Interop/Windows/Kernel32/Interop.SetThreadErrorMode.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.InteropServices;
internal static partial class Interop
{
internal static partial class Kernel32
{
[SuppressGCTransition]
[GeneratedDllImport(Libraries.Kernel32, SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
internal static partial bool SetThreadErrorMode(
uint dwNewMode,
out uint lpOldMode);
internal const uint SEM_FAILCRITICALERRORS = 1;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.InteropServices;
internal static partial class Interop
{
internal static partial class Kernel32
{
[SuppressGCTransition]
[GeneratedDllImport(Libraries.Kernel32, SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
internal static partial bool SetThreadErrorMode(
uint dwNewMode,
out uint lpOldMode);
internal const uint SEM_FAILCRITICALERRORS = 1;
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Linq.Expressions/src/System/Dynamic/Utils/TypeUtils.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Linq;
using System.Linq.Expressions;
using System.Reflection;
namespace System.Dynamic.Utils
{
internal static class TypeUtils
{
private static readonly Type[] s_arrayAssignableInterfaces = typeof(int[]).GetInterfaces()
.Where(i => i.IsGenericType)
.Select(i => i.GetGenericTypeDefinition())
.ToArray();
private static readonly ConstructorInfo s_nullableConstructor = typeof(Nullable<>).GetConstructor(typeof(Nullable<>).GetGenericArguments())!;
public static Type GetNonNullableType(this Type type) => IsNullableType(type) ? type.GetGenericArguments()[0] : type;
public static Type GetNullableType(this Type type)
{
Debug.Assert(type != null, "type cannot be null");
if (type.IsValueType && !IsNullableType(type))
{
return typeof(Nullable<>).MakeGenericType(type);
}
return type;
}
public static ConstructorInfo GetNullableConstructor(Type nullableType)
{
Debug.Assert(nullableType.IsNullableType());
return (ConstructorInfo)nullableType.GetMemberWithSameMetadataDefinitionAs(s_nullableConstructor);
}
public static bool IsNullableType(this Type type) => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(Nullable<>);
public static bool IsNullableOrReferenceType(this Type type) => !type.IsValueType || IsNullableType(type);
public static bool IsBool(this Type type) => GetNonNullableType(type) == typeof(bool);
public static bool IsNumeric(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Char:
case TypeCode.SByte:
case TypeCode.Byte:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Double:
case TypeCode.Single:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsInteger(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Byte:
case TypeCode.SByte:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsInteger64(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Int64:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsArithmetic(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Double:
case TypeCode.Single:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsUnsignedInt(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsIntegerOrBool(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Int64:
case TypeCode.Int32:
case TypeCode.Int16:
case TypeCode.UInt64:
case TypeCode.UInt32:
case TypeCode.UInt16:
case TypeCode.Boolean:
case TypeCode.SByte:
case TypeCode.Byte:
return true;
}
}
return false;
}
public static bool IsNumericOrBool(this Type type) => IsNumeric(type) || IsBool(type);
// Checks if the type is a valid target for an instance call
public static bool IsValidInstanceType(MemberInfo member, Type instanceType)
{
Type? targetType = member.DeclaringType;
if (targetType == null)
{
return false;
}
if (AreReferenceAssignable(targetType, instanceType))
{
return true;
}
if (instanceType.IsValueType)
{
if (AreReferenceAssignable(targetType, typeof(object)))
{
return true;
}
if (AreReferenceAssignable(targetType, typeof(ValueType)))
{
return true;
}
if (instanceType.IsEnum && AreReferenceAssignable(targetType, typeof(Enum)))
{
return true;
}
// A call to an interface implemented by a struct is legal whether the struct has
// been boxed or not.
if (targetType.IsInterface)
{
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The targetType must be preserved (since we have an instance of it here)," +
"So if it's an interface that interface will be preserved everywhere" +
"So if it was implemented by the instanceType, it will be kept even after trimming." +
"The fact that GetInterfaces may return fewer interfaces doesn't matter as long" +
"as it returns the one we're looking for.")]
static Type[] GetTypeInterfaces(Type instanceType) => instanceType.GetInterfaces();
foreach (Type interfaceType in GetTypeInterfaces(instanceType))
{
if (AreReferenceAssignable(targetType, interfaceType))
{
return true;
}
}
}
}
return false;
}
public static bool HasIdentityPrimitiveOrNullableConversionTo(this Type source, Type dest)
{
Debug.Assert(source != null && dest != null);
// Identity conversion
if (AreEquivalent(source, dest))
{
return true;
}
// Nullable conversions
if (IsNullableType(source) && AreEquivalent(dest, GetNonNullableType(source)))
{
return true;
}
if (IsNullableType(dest) && AreEquivalent(source, GetNonNullableType(dest)))
{
return true;
}
// Primitive runtime conversions
// All conversions amongst enum, bool, char, integer and float types
// (and their corresponding nullable types) are legal except for
// nonbool==>bool and nonbool==>bool? which are only legal from
// bool-backed enums.
return IsConvertible(source) && IsConvertible(dest)
&& (GetNonNullableType(dest) != typeof(bool)
|| source.IsEnum && source.GetEnumUnderlyingType() == typeof(bool));
}
public static bool HasReferenceConversionTo(this Type source, Type dest)
{
Debug.Assert(source != null && dest != null);
// void -> void conversion is handled elsewhere
// (it's an identity conversion)
// All other void conversions are disallowed.
if (source == typeof(void) || dest == typeof(void))
{
return false;
}
Type nnSourceType = GetNonNullableType(source);
Type nnDestType = GetNonNullableType(dest);
// Down conversion
if (nnSourceType.IsAssignableFrom(nnDestType))
{
return true;
}
// Up conversion
if (nnDestType.IsAssignableFrom(nnSourceType))
{
return true;
}
// Interface conversion
if (source.IsInterface || dest.IsInterface)
{
return true;
}
// Variant delegate conversion
if (IsLegalExplicitVariantDelegateConversion(source, dest))
{
return true;
}
// Object conversion handled by assignable above.
Debug.Assert(source != typeof(object) && dest != typeof(object));
return (source.IsArray || dest.IsArray) && StrictHasReferenceConversionTo(source, dest, true);
}
private static bool StrictHasReferenceConversionTo(this Type source, Type dest, bool skipNonArray)
{
// HasReferenceConversionTo was both too strict and too lax. It was too strict in prohibiting
// some valid conversions involving arrays, and too lax in allowing casts between interfaces
// and sealed classes that don't implement them. Unfortunately fixing the lax cases would be
// a breaking change, especially since such expressions will even work if only given null
// arguments.
// This method catches the cases that were incorrectly disallowed, but when it needs to
// examine possible conversions of element or type parameters it applies stricter rules.
while (true)
{
if (!skipNonArray) // Skip if we just came from HasReferenceConversionTo and have just tested these
{
if (source.IsValueType | dest.IsValueType)
{
return false;
}
// Includes to case of either being typeof(object)
if (source.IsAssignableFrom(dest) || dest.IsAssignableFrom(source))
{
return true;
}
if (source.IsInterface)
{
if (dest.IsInterface || dest.IsClass && !dest.IsSealed)
{
return true;
}
}
else if (dest.IsInterface)
{
if (source.IsClass && !source.IsSealed)
{
return true;
}
}
}
if (source.IsArray)
{
if (dest.IsArray)
{
if (source.GetArrayRank() != dest.GetArrayRank() || source.IsSZArray != dest.IsSZArray)
{
return false;
}
source = source.GetElementType()!;
dest = dest.GetElementType()!;
skipNonArray = false;
}
else
{
return HasArrayToInterfaceConversion(source, dest);
}
}
else if (dest.IsArray)
{
if (HasInterfaceToArrayConversion(source, dest))
{
return true;
}
return IsImplicitReferenceConversion(typeof(Array), source);
}
else
{
return IsLegalExplicitVariantDelegateConversion(source, dest);
}
}
}
private static bool HasArrayToInterfaceConversion(Type source, Type dest)
{
Debug.Assert(source.IsArray);
if (!source.IsSZArray || !dest.IsInterface || !dest.IsGenericType)
{
return false;
}
Type[] destParams = dest.GetGenericArguments();
if (destParams.Length != 1)
{
return false;
}
Type destGen = dest.GetGenericTypeDefinition();
foreach (Type iface in s_arrayAssignableInterfaces)
{
if (AreEquivalent(destGen, iface))
{
return StrictHasReferenceConversionTo(source.GetElementType()!, destParams[0], false);
}
}
return false;
}
private static bool HasInterfaceToArrayConversion(Type source, Type dest)
{
Debug.Assert(dest.IsSZArray);
if (!dest.IsSZArray || !source.IsInterface || !source.IsGenericType)
{
return false;
}
Type[] sourceParams = source.GetGenericArguments();
if (sourceParams.Length != 1)
{
return false;
}
Type sourceGen = source.GetGenericTypeDefinition();
foreach (Type iface in s_arrayAssignableInterfaces)
{
if (AreEquivalent(sourceGen, iface))
{
return StrictHasReferenceConversionTo(sourceParams[0], dest.GetElementType()!, false);
}
}
return false;
}
private static bool IsCovariant(Type t)
{
Debug.Assert(t != null);
return 0 != (t.GenericParameterAttributes & GenericParameterAttributes.Covariant);
}
private static bool IsContravariant(Type t)
{
Debug.Assert(t != null);
return 0 != (t.GenericParameterAttributes & GenericParameterAttributes.Contravariant);
}
private static bool IsInvariant(Type t)
{
Debug.Assert(t != null);
return 0 == (t.GenericParameterAttributes & GenericParameterAttributes.VarianceMask);
}
private static bool IsDelegate(Type t)
{
Debug.Assert(t != null);
return t.IsSubclassOf(typeof(MulticastDelegate));
}
public static bool IsLegalExplicitVariantDelegateConversion(Type source, Type dest)
{
Debug.Assert(source != null && dest != null);
// There *might* be a legal conversion from a generic delegate type S to generic delegate type T,
// provided all of the follow are true:
// o Both types are constructed generic types of the same generic delegate type, D<X1,... Xk>.
// That is, S = D<S1...>, T = D<T1...>.
// o If type parameter Xi is declared to be invariant then Si must be identical to Ti.
// o If type parameter Xi is declared to be covariant ("out") then Si must be convertible
// to Ti via an identify conversion, implicit reference conversion, or explicit reference conversion.
// o If type parameter Xi is declared to be contravariant ("in") then either Si must be identical to Ti,
// or Si and Ti must both be reference types.
if (!IsDelegate(source) || !IsDelegate(dest) || !source.IsGenericType || !dest.IsGenericType)
{
return false;
}
Type genericDelegate = source.GetGenericTypeDefinition();
if (dest.GetGenericTypeDefinition() != genericDelegate)
{
return false;
}
Type[] genericParameters = genericDelegate.GetGenericArguments();
Type[] sourceArguments = source.GetGenericArguments();
Type[] destArguments = dest.GetGenericArguments();
Debug.Assert(genericParameters != null);
Debug.Assert(sourceArguments != null);
Debug.Assert(destArguments != null);
Debug.Assert(genericParameters.Length == sourceArguments.Length);
Debug.Assert(genericParameters.Length == destArguments.Length);
for (int iParam = 0; iParam < genericParameters.Length; ++iParam)
{
Type sourceArgument = sourceArguments[iParam];
Type destArgument = destArguments[iParam];
Debug.Assert(sourceArgument != null && destArgument != null);
// If the arguments are identical then this one is automatically good, so skip it.
if (AreEquivalent(sourceArgument, destArgument))
{
continue;
}
Type genericParameter = genericParameters[iParam];
Debug.Assert(genericParameter != null);
if (IsInvariant(genericParameter))
{
return false;
}
if (IsCovariant(genericParameter))
{
if (!sourceArgument.HasReferenceConversionTo(destArgument))
{
return false;
}
}
else if (IsContravariant(genericParameter) && (sourceArgument.IsValueType || destArgument.IsValueType))
{
return false;
}
}
return true;
}
public static bool IsConvertible(this Type type)
{
type = GetNonNullableType(type);
if (type.IsEnum)
{
return true;
}
switch (type.GetTypeCode())
{
case TypeCode.Boolean:
case TypeCode.Byte:
case TypeCode.SByte:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Char:
return true;
default:
return false;
}
}
public static bool HasReferenceEquality(Type left, Type right)
{
if (left.IsValueType || right.IsValueType)
{
return false;
}
// If we have an interface and a reference type then we can do
// reference equality.
// If we have two reference types and one is assignable to the
// other then we can do reference equality.
return left.IsInterface || right.IsInterface || AreReferenceAssignable(left, right)
|| AreReferenceAssignable(right, left);
}
public static bool HasBuiltInEqualityOperator(Type left, Type right)
{
// If we have an interface and a reference type then we can do
// reference equality.
if (left.IsInterface && !right.IsValueType)
{
return true;
}
if (right.IsInterface && !left.IsValueType)
{
return true;
}
// If we have two reference types and one is assignable to the
// other then we can do reference equality.
if (!left.IsValueType && !right.IsValueType)
{
if (AreReferenceAssignable(left, right) || AreReferenceAssignable(right, left))
{
return true;
}
}
// Otherwise, if the types are not the same then we definitely
// do not have a built-in equality operator.
if (!AreEquivalent(left, right))
{
return false;
}
// We have two identical value types, modulo nullability. (If they were both the
// same reference type then we would have returned true earlier.)
Debug.Assert(left.IsValueType);
// Equality between struct types is only defined for numerics, bools, enums,
// and their nullable equivalents.
Type nnType = GetNonNullableType(left);
return nnType == typeof(bool) || IsNumeric(nnType) || nnType.IsEnum;
}
public static bool IsImplicitlyConvertibleTo(this Type source, Type destination) =>
AreEquivalent(source, destination) // identity conversion
|| IsImplicitNumericConversion(source, destination)
|| IsImplicitReferenceConversion(source, destination)
|| IsImplicitBoxingConversion(source, destination)
|| IsImplicitNullableConversion(source, destination);
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2075:UnrecognizedReflectionPattern",
Justification = "The trimmer doesn't remove operators when System.Linq.Expressions is used. See https://github.com/mono/linker/pull/2125.")]
public static MethodInfo? GetUserDefinedCoercionMethod(Type convertFrom, Type convertToType)
{
Type nnExprType = GetNonNullableType(convertFrom);
Type nnConvType = GetNonNullableType(convertToType);
// try exact match on types
MethodInfo[] eMethods = nnExprType.GetMethods(BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic);
MethodInfo? method = FindConversionOperator(eMethods, convertFrom, convertToType);
if (method != null)
{
return method;
}
MethodInfo[] cMethods = nnConvType.GetMethods(BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic);
method = FindConversionOperator(cMethods, convertFrom, convertToType);
if (method != null)
{
return method;
}
if (AreEquivalent(nnExprType, convertFrom) && AreEquivalent(nnConvType, convertToType))
{
return null;
}
// try lifted conversion
return FindConversionOperator(eMethods, nnExprType, nnConvType)
?? FindConversionOperator(cMethods, nnExprType, nnConvType)
?? FindConversionOperator(eMethods, nnExprType, convertToType)
?? FindConversionOperator(cMethods, nnExprType, convertToType);
}
private static MethodInfo? FindConversionOperator(MethodInfo[] methods, Type? typeFrom, Type? typeTo)
{
foreach (MethodInfo mi in methods)
{
if ((mi.Name == "op_Implicit" || mi.Name == "op_Explicit") && AreEquivalent(mi.ReturnType, typeTo))
{
ParameterInfo[] pis = mi.GetParametersCached();
if (pis.Length == 1 && AreEquivalent(pis[0].ParameterType, typeFrom))
{
return mi;
}
}
}
return null;
}
private static bool IsImplicitNumericConversion(Type source, Type destination)
{
TypeCode tcSource = source.GetTypeCode();
TypeCode tcDest = destination.GetTypeCode();
switch (tcSource)
{
case TypeCode.SByte:
switch (tcDest)
{
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Byte:
switch (tcDest)
{
case TypeCode.Int16:
case TypeCode.UInt16:
case TypeCode.Int32:
case TypeCode.UInt32:
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Int16:
switch (tcDest)
{
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.UInt16:
switch (tcDest)
{
case TypeCode.Int32:
case TypeCode.UInt32:
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Int32:
switch (tcDest)
{
case TypeCode.Int64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.UInt32:
switch (tcDest)
{
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Int64:
case TypeCode.UInt64:
switch (tcDest)
{
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Char:
switch (tcDest)
{
case TypeCode.UInt16:
case TypeCode.Int32:
case TypeCode.UInt32:
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Single:
return tcDest == TypeCode.Double;
}
return false;
}
private static bool IsImplicitReferenceConversion(Type source, Type destination) =>
destination.IsAssignableFrom(source);
private static bool IsImplicitBoxingConversion(Type source, Type destination) =>
source.IsValueType && (destination == typeof(object) || destination == typeof(ValueType)) || source.IsEnum && destination == typeof(Enum);
private static bool IsImplicitNullableConversion(Type source, Type destination) =>
IsNullableType(destination) && IsImplicitlyConvertibleTo(GetNonNullableType(source), GetNonNullableType(destination));
public static Type? FindGenericType(Type definition, Type? type)
{
// For now this helper doesn't support interfaces
Debug.Assert(!definition.IsInterface);
while (type is not null && type != typeof(object))
{
if (type.IsConstructedGenericType && AreEquivalent(type.GetGenericTypeDefinition(), definition))
{
return type;
}
type = type.BaseType;
}
return null;
}
/// <summary>
/// Searches for an operator method on the type. The method must have
/// the specified signature, no generic arguments, and have the
/// SpecialName bit set. Also searches inherited operator methods.
///
/// NOTE: This was designed to satisfy the needs of op_True and
/// op_False, because we have to do runtime lookup for those. It may
/// not work right for unary operators in general.
/// </summary>
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2067:UnrecognizedReflectionPattern",
Justification = "The trimmer doesn't remove operators when System.Linq.Expressions is used. See https://github.com/mono/linker/pull/2125.")]
public static MethodInfo? GetBooleanOperator(Type type, string name)
{
Debug.Assert(name == "op_False" || name == "op_True");
do
{
MethodInfo? result = type.GetAnyStaticMethodValidated(name, new[] { type });
if (result != null && result.IsSpecialName && !result.ContainsGenericParameters)
{
return result;
}
type = type.BaseType!;
} while (type != null);
return null;
}
public static Type GetNonRefType(this Type type) => type.IsByRef ? type.GetElementType()! : type;
public static bool AreEquivalent(Type? t1, Type? t2) => t1 != null && t1.IsEquivalentTo(t2);
public static bool AreReferenceAssignable(Type dest, Type src)
{
// This actually implements "Is this identity assignable and/or reference assignable?"
if (AreEquivalent(dest, src))
{
return true;
}
return !dest.IsValueType && !src.IsValueType && dest.IsAssignableFrom(src);
}
public static bool IsSameOrSubclass(Type type, Type subType) =>
AreEquivalent(type, subType) || subType.IsSubclassOf(type);
public static void ValidateType(Type type, string? paramName) => ValidateType(type, paramName, false, false);
public static void ValidateType(Type type, string? paramName, bool allowByRef, bool allowPointer)
{
if (ValidateType(type, paramName, -1))
{
if (!allowByRef && type.IsByRef)
{
throw Error.TypeMustNotBeByRef(paramName);
}
if (!allowPointer && type.IsPointer)
{
throw Error.TypeMustNotBePointer(paramName);
}
}
}
public static bool ValidateType(Type type, string? paramName, int index)
{
if (type == typeof(void))
{
return false; // Caller can skip further checks.
}
if (type.ContainsGenericParameters)
{
throw type.IsGenericTypeDefinition
? Error.TypeIsGeneric(type, paramName, index)
: Error.TypeContainsGenericParameters(type, paramName, index);
}
return true;
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The trimmer will never remove the Invoke method from delegates.")]
public static MethodInfo GetInvokeMethod(this Type delegateType)
{
Debug.Assert(typeof(Delegate).IsAssignableFrom(delegateType));
return delegateType.GetMethod("Invoke", BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic)!;
}
internal static bool IsUnsigned(this Type type) => IsUnsigned(GetNonNullableType(type).GetTypeCode());
internal static bool IsUnsigned(this TypeCode typeCode)
{
switch (typeCode)
{
case TypeCode.Byte:
case TypeCode.UInt16:
case TypeCode.Char:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
default:
return false;
}
}
internal static bool IsFloatingPoint(this Type type) => IsFloatingPoint(GetNonNullableType(type).GetTypeCode());
internal static bool IsFloatingPoint(this TypeCode typeCode)
{
switch (typeCode)
{
case TypeCode.Single:
case TypeCode.Double:
return true;
default:
return false;
}
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The Array 'Get' method is dynamically constructed and is not included in IL. It is not subject to trimming.")]
public static MethodInfo GetArrayGetMethod(Type arrayType)
{
Debug.Assert(arrayType.IsArray);
return arrayType.GetMethod("Get", BindingFlags.Public | BindingFlags.Instance)!;
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The Array 'Set' method is dynamically constructed and is not included in IL. It is not subject to trimming.")]
public static MethodInfo GetArraySetMethod(Type arrayType)
{
Debug.Assert(arrayType.IsArray);
return arrayType.GetMethod("Set", BindingFlags.Public | BindingFlags.Instance)!;
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The Array 'Address' method is dynamically constructed and is not included in IL. It is not subject to trimming.")]
public static MethodInfo GetArrayAddressMethod(Type arrayType)
{
Debug.Assert(arrayType.IsArray);
return arrayType.GetMethod("Address", BindingFlags.Public | BindingFlags.Instance)!;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Linq;
using System.Linq.Expressions;
using System.Reflection;
namespace System.Dynamic.Utils
{
internal static class TypeUtils
{
private static readonly Type[] s_arrayAssignableInterfaces = typeof(int[]).GetInterfaces()
.Where(i => i.IsGenericType)
.Select(i => i.GetGenericTypeDefinition())
.ToArray();
private static readonly ConstructorInfo s_nullableConstructor = typeof(Nullable<>).GetConstructor(typeof(Nullable<>).GetGenericArguments())!;
public static Type GetNonNullableType(this Type type) => IsNullableType(type) ? type.GetGenericArguments()[0] : type;
public static Type GetNullableType(this Type type)
{
Debug.Assert(type != null, "type cannot be null");
if (type.IsValueType && !IsNullableType(type))
{
return typeof(Nullable<>).MakeGenericType(type);
}
return type;
}
public static ConstructorInfo GetNullableConstructor(Type nullableType)
{
Debug.Assert(nullableType.IsNullableType());
return (ConstructorInfo)nullableType.GetMemberWithSameMetadataDefinitionAs(s_nullableConstructor);
}
public static bool IsNullableType(this Type type) => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(Nullable<>);
public static bool IsNullableOrReferenceType(this Type type) => !type.IsValueType || IsNullableType(type);
public static bool IsBool(this Type type) => GetNonNullableType(type) == typeof(bool);
public static bool IsNumeric(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Char:
case TypeCode.SByte:
case TypeCode.Byte:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Double:
case TypeCode.Single:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsInteger(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Byte:
case TypeCode.SByte:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsInteger64(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Int64:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsArithmetic(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Double:
case TypeCode.Single:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsUnsignedInt(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
}
}
return false;
}
public static bool IsIntegerOrBool(this Type type)
{
type = GetNonNullableType(type);
if (!type.IsEnum)
{
switch (type.GetTypeCode())
{
case TypeCode.Int64:
case TypeCode.Int32:
case TypeCode.Int16:
case TypeCode.UInt64:
case TypeCode.UInt32:
case TypeCode.UInt16:
case TypeCode.Boolean:
case TypeCode.SByte:
case TypeCode.Byte:
return true;
}
}
return false;
}
public static bool IsNumericOrBool(this Type type) => IsNumeric(type) || IsBool(type);
// Checks if the type is a valid target for an instance call
public static bool IsValidInstanceType(MemberInfo member, Type instanceType)
{
Type? targetType = member.DeclaringType;
if (targetType == null)
{
return false;
}
if (AreReferenceAssignable(targetType, instanceType))
{
return true;
}
if (instanceType.IsValueType)
{
if (AreReferenceAssignable(targetType, typeof(object)))
{
return true;
}
if (AreReferenceAssignable(targetType, typeof(ValueType)))
{
return true;
}
if (instanceType.IsEnum && AreReferenceAssignable(targetType, typeof(Enum)))
{
return true;
}
// A call to an interface implemented by a struct is legal whether the struct has
// been boxed or not.
if (targetType.IsInterface)
{
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The targetType must be preserved (since we have an instance of it here)," +
"So if it's an interface that interface will be preserved everywhere" +
"So if it was implemented by the instanceType, it will be kept even after trimming." +
"The fact that GetInterfaces may return fewer interfaces doesn't matter as long" +
"as it returns the one we're looking for.")]
static Type[] GetTypeInterfaces(Type instanceType) => instanceType.GetInterfaces();
foreach (Type interfaceType in GetTypeInterfaces(instanceType))
{
if (AreReferenceAssignable(targetType, interfaceType))
{
return true;
}
}
}
}
return false;
}
public static bool HasIdentityPrimitiveOrNullableConversionTo(this Type source, Type dest)
{
Debug.Assert(source != null && dest != null);
// Identity conversion
if (AreEquivalent(source, dest))
{
return true;
}
// Nullable conversions
if (IsNullableType(source) && AreEquivalent(dest, GetNonNullableType(source)))
{
return true;
}
if (IsNullableType(dest) && AreEquivalent(source, GetNonNullableType(dest)))
{
return true;
}
// Primitive runtime conversions
// All conversions amongst enum, bool, char, integer and float types
// (and their corresponding nullable types) are legal except for
// nonbool==>bool and nonbool==>bool? which are only legal from
// bool-backed enums.
return IsConvertible(source) && IsConvertible(dest)
&& (GetNonNullableType(dest) != typeof(bool)
|| source.IsEnum && source.GetEnumUnderlyingType() == typeof(bool));
}
public static bool HasReferenceConversionTo(this Type source, Type dest)
{
Debug.Assert(source != null && dest != null);
// void -> void conversion is handled elsewhere
// (it's an identity conversion)
// All other void conversions are disallowed.
if (source == typeof(void) || dest == typeof(void))
{
return false;
}
Type nnSourceType = GetNonNullableType(source);
Type nnDestType = GetNonNullableType(dest);
// Down conversion
if (nnSourceType.IsAssignableFrom(nnDestType))
{
return true;
}
// Up conversion
if (nnDestType.IsAssignableFrom(nnSourceType))
{
return true;
}
// Interface conversion
if (source.IsInterface || dest.IsInterface)
{
return true;
}
// Variant delegate conversion
if (IsLegalExplicitVariantDelegateConversion(source, dest))
{
return true;
}
// Object conversion handled by assignable above.
Debug.Assert(source != typeof(object) && dest != typeof(object));
return (source.IsArray || dest.IsArray) && StrictHasReferenceConversionTo(source, dest, true);
}
private static bool StrictHasReferenceConversionTo(this Type source, Type dest, bool skipNonArray)
{
// HasReferenceConversionTo was both too strict and too lax. It was too strict in prohibiting
// some valid conversions involving arrays, and too lax in allowing casts between interfaces
// and sealed classes that don't implement them. Unfortunately fixing the lax cases would be
// a breaking change, especially since such expressions will even work if only given null
// arguments.
// This method catches the cases that were incorrectly disallowed, but when it needs to
// examine possible conversions of element or type parameters it applies stricter rules.
while (true)
{
if (!skipNonArray) // Skip if we just came from HasReferenceConversionTo and have just tested these
{
if (source.IsValueType | dest.IsValueType)
{
return false;
}
// Includes to case of either being typeof(object)
if (source.IsAssignableFrom(dest) || dest.IsAssignableFrom(source))
{
return true;
}
if (source.IsInterface)
{
if (dest.IsInterface || dest.IsClass && !dest.IsSealed)
{
return true;
}
}
else if (dest.IsInterface)
{
if (source.IsClass && !source.IsSealed)
{
return true;
}
}
}
if (source.IsArray)
{
if (dest.IsArray)
{
if (source.GetArrayRank() != dest.GetArrayRank() || source.IsSZArray != dest.IsSZArray)
{
return false;
}
source = source.GetElementType()!;
dest = dest.GetElementType()!;
skipNonArray = false;
}
else
{
return HasArrayToInterfaceConversion(source, dest);
}
}
else if (dest.IsArray)
{
if (HasInterfaceToArrayConversion(source, dest))
{
return true;
}
return IsImplicitReferenceConversion(typeof(Array), source);
}
else
{
return IsLegalExplicitVariantDelegateConversion(source, dest);
}
}
}
private static bool HasArrayToInterfaceConversion(Type source, Type dest)
{
Debug.Assert(source.IsArray);
if (!source.IsSZArray || !dest.IsInterface || !dest.IsGenericType)
{
return false;
}
Type[] destParams = dest.GetGenericArguments();
if (destParams.Length != 1)
{
return false;
}
Type destGen = dest.GetGenericTypeDefinition();
foreach (Type iface in s_arrayAssignableInterfaces)
{
if (AreEquivalent(destGen, iface))
{
return StrictHasReferenceConversionTo(source.GetElementType()!, destParams[0], false);
}
}
return false;
}
private static bool HasInterfaceToArrayConversion(Type source, Type dest)
{
Debug.Assert(dest.IsSZArray);
if (!dest.IsSZArray || !source.IsInterface || !source.IsGenericType)
{
return false;
}
Type[] sourceParams = source.GetGenericArguments();
if (sourceParams.Length != 1)
{
return false;
}
Type sourceGen = source.GetGenericTypeDefinition();
foreach (Type iface in s_arrayAssignableInterfaces)
{
if (AreEquivalent(sourceGen, iface))
{
return StrictHasReferenceConversionTo(sourceParams[0], dest.GetElementType()!, false);
}
}
return false;
}
private static bool IsCovariant(Type t)
{
Debug.Assert(t != null);
return 0 != (t.GenericParameterAttributes & GenericParameterAttributes.Covariant);
}
private static bool IsContravariant(Type t)
{
Debug.Assert(t != null);
return 0 != (t.GenericParameterAttributes & GenericParameterAttributes.Contravariant);
}
private static bool IsInvariant(Type t)
{
Debug.Assert(t != null);
return 0 == (t.GenericParameterAttributes & GenericParameterAttributes.VarianceMask);
}
private static bool IsDelegate(Type t)
{
Debug.Assert(t != null);
return t.IsSubclassOf(typeof(MulticastDelegate));
}
public static bool IsLegalExplicitVariantDelegateConversion(Type source, Type dest)
{
Debug.Assert(source != null && dest != null);
// There *might* be a legal conversion from a generic delegate type S to generic delegate type T,
// provided all of the follow are true:
// o Both types are constructed generic types of the same generic delegate type, D<X1,... Xk>.
// That is, S = D<S1...>, T = D<T1...>.
// o If type parameter Xi is declared to be invariant then Si must be identical to Ti.
// o If type parameter Xi is declared to be covariant ("out") then Si must be convertible
// to Ti via an identify conversion, implicit reference conversion, or explicit reference conversion.
// o If type parameter Xi is declared to be contravariant ("in") then either Si must be identical to Ti,
// or Si and Ti must both be reference types.
if (!IsDelegate(source) || !IsDelegate(dest) || !source.IsGenericType || !dest.IsGenericType)
{
return false;
}
Type genericDelegate = source.GetGenericTypeDefinition();
if (dest.GetGenericTypeDefinition() != genericDelegate)
{
return false;
}
Type[] genericParameters = genericDelegate.GetGenericArguments();
Type[] sourceArguments = source.GetGenericArguments();
Type[] destArguments = dest.GetGenericArguments();
Debug.Assert(genericParameters != null);
Debug.Assert(sourceArguments != null);
Debug.Assert(destArguments != null);
Debug.Assert(genericParameters.Length == sourceArguments.Length);
Debug.Assert(genericParameters.Length == destArguments.Length);
for (int iParam = 0; iParam < genericParameters.Length; ++iParam)
{
Type sourceArgument = sourceArguments[iParam];
Type destArgument = destArguments[iParam];
Debug.Assert(sourceArgument != null && destArgument != null);
// If the arguments are identical then this one is automatically good, so skip it.
if (AreEquivalent(sourceArgument, destArgument))
{
continue;
}
Type genericParameter = genericParameters[iParam];
Debug.Assert(genericParameter != null);
if (IsInvariant(genericParameter))
{
return false;
}
if (IsCovariant(genericParameter))
{
if (!sourceArgument.HasReferenceConversionTo(destArgument))
{
return false;
}
}
else if (IsContravariant(genericParameter) && (sourceArgument.IsValueType || destArgument.IsValueType))
{
return false;
}
}
return true;
}
public static bool IsConvertible(this Type type)
{
type = GetNonNullableType(type);
if (type.IsEnum)
{
return true;
}
switch (type.GetTypeCode())
{
case TypeCode.Boolean:
case TypeCode.Byte:
case TypeCode.SByte:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.UInt16:
case TypeCode.UInt32:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Char:
return true;
default:
return false;
}
}
public static bool HasReferenceEquality(Type left, Type right)
{
if (left.IsValueType || right.IsValueType)
{
return false;
}
// If we have an interface and a reference type then we can do
// reference equality.
// If we have two reference types and one is assignable to the
// other then we can do reference equality.
return left.IsInterface || right.IsInterface || AreReferenceAssignable(left, right)
|| AreReferenceAssignable(right, left);
}
public static bool HasBuiltInEqualityOperator(Type left, Type right)
{
// If we have an interface and a reference type then we can do
// reference equality.
if (left.IsInterface && !right.IsValueType)
{
return true;
}
if (right.IsInterface && !left.IsValueType)
{
return true;
}
// If we have two reference types and one is assignable to the
// other then we can do reference equality.
if (!left.IsValueType && !right.IsValueType)
{
if (AreReferenceAssignable(left, right) || AreReferenceAssignable(right, left))
{
return true;
}
}
// Otherwise, if the types are not the same then we definitely
// do not have a built-in equality operator.
if (!AreEquivalent(left, right))
{
return false;
}
// We have two identical value types, modulo nullability. (If they were both the
// same reference type then we would have returned true earlier.)
Debug.Assert(left.IsValueType);
// Equality between struct types is only defined for numerics, bools, enums,
// and their nullable equivalents.
Type nnType = GetNonNullableType(left);
return nnType == typeof(bool) || IsNumeric(nnType) || nnType.IsEnum;
}
public static bool IsImplicitlyConvertibleTo(this Type source, Type destination) =>
AreEquivalent(source, destination) // identity conversion
|| IsImplicitNumericConversion(source, destination)
|| IsImplicitReferenceConversion(source, destination)
|| IsImplicitBoxingConversion(source, destination)
|| IsImplicitNullableConversion(source, destination);
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2075:UnrecognizedReflectionPattern",
Justification = "The trimmer doesn't remove operators when System.Linq.Expressions is used. See https://github.com/mono/linker/pull/2125.")]
public static MethodInfo? GetUserDefinedCoercionMethod(Type convertFrom, Type convertToType)
{
Type nnExprType = GetNonNullableType(convertFrom);
Type nnConvType = GetNonNullableType(convertToType);
// try exact match on types
MethodInfo[] eMethods = nnExprType.GetMethods(BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic);
MethodInfo? method = FindConversionOperator(eMethods, convertFrom, convertToType);
if (method != null)
{
return method;
}
MethodInfo[] cMethods = nnConvType.GetMethods(BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic);
method = FindConversionOperator(cMethods, convertFrom, convertToType);
if (method != null)
{
return method;
}
if (AreEquivalent(nnExprType, convertFrom) && AreEquivalent(nnConvType, convertToType))
{
return null;
}
// try lifted conversion
return FindConversionOperator(eMethods, nnExprType, nnConvType)
?? FindConversionOperator(cMethods, nnExprType, nnConvType)
?? FindConversionOperator(eMethods, nnExprType, convertToType)
?? FindConversionOperator(cMethods, nnExprType, convertToType);
}
private static MethodInfo? FindConversionOperator(MethodInfo[] methods, Type? typeFrom, Type? typeTo)
{
foreach (MethodInfo mi in methods)
{
if ((mi.Name == "op_Implicit" || mi.Name == "op_Explicit") && AreEquivalent(mi.ReturnType, typeTo))
{
ParameterInfo[] pis = mi.GetParametersCached();
if (pis.Length == 1 && AreEquivalent(pis[0].ParameterType, typeFrom))
{
return mi;
}
}
}
return null;
}
private static bool IsImplicitNumericConversion(Type source, Type destination)
{
TypeCode tcSource = source.GetTypeCode();
TypeCode tcDest = destination.GetTypeCode();
switch (tcSource)
{
case TypeCode.SByte:
switch (tcDest)
{
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Byte:
switch (tcDest)
{
case TypeCode.Int16:
case TypeCode.UInt16:
case TypeCode.Int32:
case TypeCode.UInt32:
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Int16:
switch (tcDest)
{
case TypeCode.Int32:
case TypeCode.Int64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.UInt16:
switch (tcDest)
{
case TypeCode.Int32:
case TypeCode.UInt32:
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Int32:
switch (tcDest)
{
case TypeCode.Int64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.UInt32:
switch (tcDest)
{
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Int64:
case TypeCode.UInt64:
switch (tcDest)
{
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Char:
switch (tcDest)
{
case TypeCode.UInt16:
case TypeCode.Int32:
case TypeCode.UInt32:
case TypeCode.Int64:
case TypeCode.UInt64:
case TypeCode.Single:
case TypeCode.Double:
case TypeCode.Decimal:
return true;
}
break;
case TypeCode.Single:
return tcDest == TypeCode.Double;
}
return false;
}
private static bool IsImplicitReferenceConversion(Type source, Type destination) =>
destination.IsAssignableFrom(source);
private static bool IsImplicitBoxingConversion(Type source, Type destination) =>
source.IsValueType && (destination == typeof(object) || destination == typeof(ValueType)) || source.IsEnum && destination == typeof(Enum);
private static bool IsImplicitNullableConversion(Type source, Type destination) =>
IsNullableType(destination) && IsImplicitlyConvertibleTo(GetNonNullableType(source), GetNonNullableType(destination));
public static Type? FindGenericType(Type definition, Type? type)
{
// For now this helper doesn't support interfaces
Debug.Assert(!definition.IsInterface);
while (type is not null && type != typeof(object))
{
if (type.IsConstructedGenericType && AreEquivalent(type.GetGenericTypeDefinition(), definition))
{
return type;
}
type = type.BaseType;
}
return null;
}
/// <summary>
/// Searches for an operator method on the type. The method must have
/// the specified signature, no generic arguments, and have the
/// SpecialName bit set. Also searches inherited operator methods.
///
/// NOTE: This was designed to satisfy the needs of op_True and
/// op_False, because we have to do runtime lookup for those. It may
/// not work right for unary operators in general.
/// </summary>
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2067:UnrecognizedReflectionPattern",
Justification = "The trimmer doesn't remove operators when System.Linq.Expressions is used. See https://github.com/mono/linker/pull/2125.")]
public static MethodInfo? GetBooleanOperator(Type type, string name)
{
Debug.Assert(name == "op_False" || name == "op_True");
do
{
MethodInfo? result = type.GetAnyStaticMethodValidated(name, new[] { type });
if (result != null && result.IsSpecialName && !result.ContainsGenericParameters)
{
return result;
}
type = type.BaseType!;
} while (type != null);
return null;
}
public static Type GetNonRefType(this Type type) => type.IsByRef ? type.GetElementType()! : type;
public static bool AreEquivalent(Type? t1, Type? t2) => t1 != null && t1.IsEquivalentTo(t2);
public static bool AreReferenceAssignable(Type dest, Type src)
{
// This actually implements "Is this identity assignable and/or reference assignable?"
if (AreEquivalent(dest, src))
{
return true;
}
return !dest.IsValueType && !src.IsValueType && dest.IsAssignableFrom(src);
}
public static bool IsSameOrSubclass(Type type, Type subType) =>
AreEquivalent(type, subType) || subType.IsSubclassOf(type);
public static void ValidateType(Type type, string? paramName) => ValidateType(type, paramName, false, false);
public static void ValidateType(Type type, string? paramName, bool allowByRef, bool allowPointer)
{
if (ValidateType(type, paramName, -1))
{
if (!allowByRef && type.IsByRef)
{
throw Error.TypeMustNotBeByRef(paramName);
}
if (!allowPointer && type.IsPointer)
{
throw Error.TypeMustNotBePointer(paramName);
}
}
}
public static bool ValidateType(Type type, string? paramName, int index)
{
if (type == typeof(void))
{
return false; // Caller can skip further checks.
}
if (type.ContainsGenericParameters)
{
throw type.IsGenericTypeDefinition
? Error.TypeIsGeneric(type, paramName, index)
: Error.TypeContainsGenericParameters(type, paramName, index);
}
return true;
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The trimmer will never remove the Invoke method from delegates.")]
public static MethodInfo GetInvokeMethod(this Type delegateType)
{
Debug.Assert(typeof(Delegate).IsAssignableFrom(delegateType));
return delegateType.GetMethod("Invoke", BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic)!;
}
internal static bool IsUnsigned(this Type type) => IsUnsigned(GetNonNullableType(type).GetTypeCode());
internal static bool IsUnsigned(this TypeCode typeCode)
{
switch (typeCode)
{
case TypeCode.Byte:
case TypeCode.UInt16:
case TypeCode.Char:
case TypeCode.UInt32:
case TypeCode.UInt64:
return true;
default:
return false;
}
}
internal static bool IsFloatingPoint(this Type type) => IsFloatingPoint(GetNonNullableType(type).GetTypeCode());
internal static bool IsFloatingPoint(this TypeCode typeCode)
{
switch (typeCode)
{
case TypeCode.Single:
case TypeCode.Double:
return true;
default:
return false;
}
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The Array 'Get' method is dynamically constructed and is not included in IL. It is not subject to trimming.")]
public static MethodInfo GetArrayGetMethod(Type arrayType)
{
Debug.Assert(arrayType.IsArray);
return arrayType.GetMethod("Get", BindingFlags.Public | BindingFlags.Instance)!;
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The Array 'Set' method is dynamically constructed and is not included in IL. It is not subject to trimming.")]
public static MethodInfo GetArraySetMethod(Type arrayType)
{
Debug.Assert(arrayType.IsArray);
return arrayType.GetMethod("Set", BindingFlags.Public | BindingFlags.Instance)!;
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
Justification = "The Array 'Address' method is dynamically constructed and is not included in IL. It is not subject to trimming.")]
public static MethodInfo GetArrayAddressMethod(Type arrayType)
{
Debug.Assert(arrayType.IsArray);
return arrayType.GetMethod("Address", BindingFlags.Public | BindingFlags.Instance)!;
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Text.Json/tests/System.Text.Json.Tests/TestCaseType.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Text.Json.Tests
{
public enum TestCaseType
{
HelloWorld,
Basic,
BasicLargeNum,
SpecialNumForm,
SpecialStrings,
ProjectLockJson,
FullSchema1,
FullSchema2,
DeepTree,
BroadTree,
LotsOfNumbers,
LotsOfStrings,
Json400B,
Json4KB,
Json40KB,
Json400KB,
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Text.Json.Tests
{
public enum TestCaseType
{
HelloWorld,
Basic,
BasicLargeNum,
SpecialNumForm,
SpecialStrings,
ProjectLockJson,
FullSchema1,
FullSchema2,
DeepTree,
BroadTree,
LotsOfNumbers,
LotsOfStrings,
Json400B,
Json4KB,
Json40KB,
Json400KB,
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Private.CoreLib/src/System/IAsyncResult.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Interface: IAsyncResult
**
** Purpose: Interface to encapsulate the results of an async
** operation
**
===========================================================*/
using System.Threading;
namespace System
{
public interface IAsyncResult
{
bool IsCompleted { get; }
WaitHandle AsyncWaitHandle { get; }
object? AsyncState { get; }
bool CompletedSynchronously { get; }
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Interface: IAsyncResult
**
** Purpose: Interface to encapsulate the results of an async
** operation
**
===========================================================*/
using System.Threading;
namespace System
{
public interface IAsyncResult
{
bool IsCompleted { get; }
WaitHandle AsyncWaitHandle { get; }
object? AsyncState { get; }
bool CompletedSynchronously { get; }
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.IO.Ports/tests/SerialStream/Write_byte_int_int.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.IO.PortsTests;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Legacy.Support;
using Xunit;
namespace System.IO.Ports.Tests
{
public class SerialStream_Write_byte_int_int : PortsTest
{
// The string size used for large byte array testing
private const int LARGE_BUFFER_SIZE = 2048;
// When we test Write and do not care about actually writing anything we must still
// create an byte array to pass into the method the following is the size of the
// byte array used in this situation
private const int DEFAULT_BUFFER_SIZE = 1;
private const int DEFAULT_BUFFER_OFFSET = 0;
private const int DEFAULT_BUFFER_COUNT = 1;
// The maximum buffer size when an exception occurs
private const int MAX_BUFFER_SIZE_FOR_EXCEPTION = 255;
// The maximum buffer size when an exception is not expected
private const int MAX_BUFFER_SIZE = 8;
// The default number of times the write method is called when verifying write
private const int DEFAULT_NUM_WRITES = 3;
#region Test Cases
[ConditionalFact(nameof(HasOneSerialPort))]
public void Buffer_Null()
{
VerifyWriteException(null, 0, 1, typeof(ArgumentNullException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Offset_NEG1()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], -1, DEFAULT_BUFFER_COUNT, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Offset_NEGRND()
{
var rndGen = new Random(-55);
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], rndGen.Next(int.MinValue, 0), DEFAULT_BUFFER_COUNT, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Offset_MinInt()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], int.MinValue, DEFAULT_BUFFER_COUNT, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Count_NEG1()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], DEFAULT_BUFFER_OFFSET, -1, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Count_NEGRND()
{
var rndGen = new Random(-55);
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], DEFAULT_BUFFER_OFFSET, rndGen.Next(int.MinValue, 0), typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Count_MinInt()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], DEFAULT_BUFFER_OFFSET, int.MinValue, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasNullModem))]
public void OffsetCount_EQ_Length_Plus_1()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = rndGen.Next(0, bufferLength);
int count = bufferLength + 1 - offset;
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void OffsetCount_GT_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = rndGen.Next(0, bufferLength);
int count = rndGen.Next(bufferLength + 1 - offset, int.MaxValue);
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void Offset_GT_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = rndGen.Next(bufferLength, int.MaxValue);
int count = DEFAULT_BUFFER_COUNT;
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void Count_GT_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = DEFAULT_BUFFER_OFFSET;
int count = rndGen.Next(bufferLength + 1, int.MaxValue);
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void OffsetCount_EQ_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = bufferLength - offset;
VerifyWrite(new byte[bufferLength], offset, count);
}
[ConditionalFact(nameof(HasNullModem))]
public void Offset_EQ_Length_Minus_1()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = bufferLength - 1;
var count = 1;
VerifyWrite(new byte[bufferLength], offset, count);
}
[ConditionalFact(nameof(HasNullModem))]
public void Count_EQ_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
var offset = 0;
int count = bufferLength;
VerifyWrite(new byte[bufferLength], offset, count);
}
[ConditionalFact(nameof(HasNullModem))]
public void ASCIIEncoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new ASCIIEncoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void UTF8Encoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new UTF8Encoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void UTF32Encoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new UTF32Encoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void UnicodeEncoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new UnicodeEncoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void LargeBuffer()
{
int bufferLength = LARGE_BUFFER_SIZE;
var offset = 0;
int count = bufferLength;
VerifyWrite(new byte[bufferLength], offset, count, 1);
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void InBreak()
{
using (var com1 = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
{
Debug.WriteLine("Verifying Write throws InvalidOperationException while in a Break");
com1.Open();
com1.BreakState = true;
Assert.Throws<InvalidOperationException>(() => com1.BaseStream.Write(new byte[8], 0, 8));
}
}
[ConditionalFact(nameof(HasOneSerialPort), nameof(HasHardwareFlowControl))]
public void Count_EQ_Zero()
{
using (var com1 = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
{
Debug.WriteLine("Verifying Write with count=0 returns immediately");
com1.Open();
com1.Handshake = Handshake.RequestToSend;
com1.BaseStream.Write(new byte[8], 0, 0);
}
}
[ConditionalFact(nameof(HasLoopbackOrNullModem))]
public void WriteBreakSequenceDoesNotCorruptData()
{
using (SerialPort com1 = TCSupport.InitFirstSerialPort())
using (SerialPort com2 = TCSupport.InitSecondSerialPort(com1))
{
com1.Open();
if (!com2.IsOpen) // This is necessary since com1 and com2 might be the same port if we are using a loopback
com2.Open();
byte[] msg = new byte[] { 0x1B, 0x40, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64 };
Task writingTask = Task.Run(() => {
com1.BaseStream.Write(msg, 0, msg.Length);
com1.BaseStream.Flush();
});
byte[] bytes = new byte[msg.Length];
int totalBytesRead = 0;
while (totalBytesRead < bytes.Length)
{
int bytesRead = com2.BaseStream.Read(bytes, totalBytesRead, bytes.Length - totalBytesRead);
totalBytesRead += bytesRead;
}
writingTask.Wait();
Assert.Equal(msg, bytes);
}
}
#endregion
#region Verification for Test Cases
private void VerifyWriteException(byte[] buffer, int offset, int count, Type expectedException)
{
using (SerialPort com = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
{
int bufferLength = null == buffer ? 0 : buffer.Length;
Debug.WriteLine("Verifying write method throws {0} buffer.Lenght={1}, offset={2}, count={3}",
expectedException, bufferLength, offset, count);
com.Open();
Assert.Throws(expectedException, () => com.BaseStream.Write(buffer, offset, count));
}
}
private void VerifyWrite(byte[] buffer, int offset, int count)
{
VerifyWrite(buffer, offset, count, new ASCIIEncoding());
}
private void VerifyWrite(byte[] buffer, int offset, int count, int numWrites)
{
VerifyWrite(buffer, offset, count, new ASCIIEncoding(), numWrites);
}
private void VerifyWrite(byte[] buffer, int offset, int count, Encoding encoding)
{
VerifyWrite(buffer, offset, count, encoding, DEFAULT_NUM_WRITES);
}
private void VerifyWrite(byte[] buffer, int offset, int count, Encoding encoding, int numWrites)
{
using (var com1 = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
using (var com2 = new SerialPort(TCSupport.LocalMachineSerialInfo.SecondAvailablePortName))
{
var rndGen = new Random(-55);
Debug.WriteLine("Verifying write method buffer.Lenght={0}, offset={1}, count={2}, endocing={3}",
buffer.Length, offset, count, encoding.EncodingName);
com1.Encoding = encoding;
com2.Encoding = encoding;
com1.Open();
com2.Open();
for (var i = 0; i < buffer.Length; i++)
{
buffer[i] = (byte)rndGen.Next(0, 256);
}
VerifyWriteByteArray(buffer, offset, count, com1, com2, numWrites);
}
}
private void VerifyWriteByteArray(byte[] buffer, int offset, int count, SerialPort com1, SerialPort com2, int numWrites)
{
var index = 0;
var oldBuffer = (byte[])buffer.Clone();
var expectedBytes = new byte[count];
var actualBytes = new byte[expectedBytes.Length * numWrites];
for (var i = 0; i < count; i++)
{
expectedBytes[i] = buffer[i + offset];
}
for (var i = 0; i < numWrites; i++)
{
com1.BaseStream.Write(buffer, offset, count);
}
com2.ReadTimeout = 500;
Thread.Sleep((int)(((expectedBytes.Length * numWrites * 10.0) / com1.BaudRate) * 1000) + 250);
// Make sure buffer was not altered during the write call
for (var i = 0; i < buffer.Length; i++)
{
if (buffer[i] != oldBuffer[i])
{
Fail("ERROR!!!: The contents of the buffer were changed from {0} to {1} at {2}", oldBuffer[i], buffer[i], i);
}
}
while (true)
{
int byteRead;
try
{
byteRead = com2.ReadByte();
}
catch (TimeoutException)
{
break;
}
if (actualBytes.Length <= index)
{
// If we have read in more bytes then we expect
Fail("ERROR!!!: We have received more bytes then were sent");
break;
}
actualBytes[index] = (byte)byteRead;
index++;
if (actualBytes.Length - index != com2.BytesToRead)
{
Fail("ERROR!!!: Expected BytesToRead={0} actual={1}", actualBytes.Length - index, com2.BytesToRead);
}
}
// Compare the bytes that were read with the ones we expected to read
for (var j = 0; j < numWrites; j++)
{
for (var i = 0; i < expectedBytes.Length; i++)
{
if (expectedBytes[i] != actualBytes[i + expectedBytes.Length * j])
{
Fail("ERROR!!!: Expected to read byte {0} actual read {1} at {2}", (int)expectedBytes[i], (int)actualBytes[i + expectedBytes.Length * j], i);
}
}
}
if (com1.IsOpen)
com1.Close();
if (com2.IsOpen)
com2.Close();
}
#endregion
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.IO.PortsTests;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Legacy.Support;
using Xunit;
namespace System.IO.Ports.Tests
{
public class SerialStream_Write_byte_int_int : PortsTest
{
// The string size used for large byte array testing
private const int LARGE_BUFFER_SIZE = 2048;
// When we test Write and do not care about actually writing anything we must still
// create an byte array to pass into the method the following is the size of the
// byte array used in this situation
private const int DEFAULT_BUFFER_SIZE = 1;
private const int DEFAULT_BUFFER_OFFSET = 0;
private const int DEFAULT_BUFFER_COUNT = 1;
// The maximum buffer size when an exception occurs
private const int MAX_BUFFER_SIZE_FOR_EXCEPTION = 255;
// The maximum buffer size when an exception is not expected
private const int MAX_BUFFER_SIZE = 8;
// The default number of times the write method is called when verifying write
private const int DEFAULT_NUM_WRITES = 3;
#region Test Cases
[ConditionalFact(nameof(HasOneSerialPort))]
public void Buffer_Null()
{
VerifyWriteException(null, 0, 1, typeof(ArgumentNullException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Offset_NEG1()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], -1, DEFAULT_BUFFER_COUNT, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Offset_NEGRND()
{
var rndGen = new Random(-55);
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], rndGen.Next(int.MinValue, 0), DEFAULT_BUFFER_COUNT, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Offset_MinInt()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], int.MinValue, DEFAULT_BUFFER_COUNT, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Count_NEG1()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], DEFAULT_BUFFER_OFFSET, -1, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Count_NEGRND()
{
var rndGen = new Random(-55);
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], DEFAULT_BUFFER_OFFSET, rndGen.Next(int.MinValue, 0), typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void Count_MinInt()
{
VerifyWriteException(new byte[DEFAULT_BUFFER_SIZE], DEFAULT_BUFFER_OFFSET, int.MinValue, typeof(ArgumentOutOfRangeException));
}
[ConditionalFact(nameof(HasNullModem))]
public void OffsetCount_EQ_Length_Plus_1()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = rndGen.Next(0, bufferLength);
int count = bufferLength + 1 - offset;
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void OffsetCount_GT_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = rndGen.Next(0, bufferLength);
int count = rndGen.Next(bufferLength + 1 - offset, int.MaxValue);
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void Offset_GT_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = rndGen.Next(bufferLength, int.MaxValue);
int count = DEFAULT_BUFFER_COUNT;
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void Count_GT_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE_FOR_EXCEPTION);
int offset = DEFAULT_BUFFER_OFFSET;
int count = rndGen.Next(bufferLength + 1, int.MaxValue);
Type expectedException = typeof(ArgumentException);
VerifyWriteException(new byte[bufferLength], offset, count, expectedException);
}
[ConditionalFact(nameof(HasNullModem))]
public void OffsetCount_EQ_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = bufferLength - offset;
VerifyWrite(new byte[bufferLength], offset, count);
}
[ConditionalFact(nameof(HasNullModem))]
public void Offset_EQ_Length_Minus_1()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = bufferLength - 1;
var count = 1;
VerifyWrite(new byte[bufferLength], offset, count);
}
[ConditionalFact(nameof(HasNullModem))]
public void Count_EQ_Length()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
var offset = 0;
int count = bufferLength;
VerifyWrite(new byte[bufferLength], offset, count);
}
[ConditionalFact(nameof(HasNullModem))]
public void ASCIIEncoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new ASCIIEncoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void UTF8Encoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new UTF8Encoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void UTF32Encoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new UTF32Encoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void UnicodeEncoding()
{
var rndGen = new Random(-55);
int bufferLength = rndGen.Next(1, MAX_BUFFER_SIZE);
int offset = rndGen.Next(0, bufferLength - 1);
int count = rndGen.Next(1, bufferLength - offset);
VerifyWrite(new byte[bufferLength], offset, count, new UnicodeEncoding());
}
[ConditionalFact(nameof(HasNullModem))]
public void LargeBuffer()
{
int bufferLength = LARGE_BUFFER_SIZE;
var offset = 0;
int count = bufferLength;
VerifyWrite(new byte[bufferLength], offset, count, 1);
}
[ConditionalFact(nameof(HasOneSerialPort))]
public void InBreak()
{
using (var com1 = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
{
Debug.WriteLine("Verifying Write throws InvalidOperationException while in a Break");
com1.Open();
com1.BreakState = true;
Assert.Throws<InvalidOperationException>(() => com1.BaseStream.Write(new byte[8], 0, 8));
}
}
[ConditionalFact(nameof(HasOneSerialPort), nameof(HasHardwareFlowControl))]
public void Count_EQ_Zero()
{
using (var com1 = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
{
Debug.WriteLine("Verifying Write with count=0 returns immediately");
com1.Open();
com1.Handshake = Handshake.RequestToSend;
com1.BaseStream.Write(new byte[8], 0, 0);
}
}
[ConditionalFact(nameof(HasLoopbackOrNullModem))]
public void WriteBreakSequenceDoesNotCorruptData()
{
using (SerialPort com1 = TCSupport.InitFirstSerialPort())
using (SerialPort com2 = TCSupport.InitSecondSerialPort(com1))
{
com1.Open();
if (!com2.IsOpen) // This is necessary since com1 and com2 might be the same port if we are using a loopback
com2.Open();
byte[] msg = new byte[] { 0x1B, 0x40, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64 };
Task writingTask = Task.Run(() => {
com1.BaseStream.Write(msg, 0, msg.Length);
com1.BaseStream.Flush();
});
byte[] bytes = new byte[msg.Length];
int totalBytesRead = 0;
while (totalBytesRead < bytes.Length)
{
int bytesRead = com2.BaseStream.Read(bytes, totalBytesRead, bytes.Length - totalBytesRead);
totalBytesRead += bytesRead;
}
writingTask.Wait();
Assert.Equal(msg, bytes);
}
}
#endregion
#region Verification for Test Cases
private void VerifyWriteException(byte[] buffer, int offset, int count, Type expectedException)
{
using (SerialPort com = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
{
int bufferLength = null == buffer ? 0 : buffer.Length;
Debug.WriteLine("Verifying write method throws {0} buffer.Lenght={1}, offset={2}, count={3}",
expectedException, bufferLength, offset, count);
com.Open();
Assert.Throws(expectedException, () => com.BaseStream.Write(buffer, offset, count));
}
}
private void VerifyWrite(byte[] buffer, int offset, int count)
{
VerifyWrite(buffer, offset, count, new ASCIIEncoding());
}
private void VerifyWrite(byte[] buffer, int offset, int count, int numWrites)
{
VerifyWrite(buffer, offset, count, new ASCIIEncoding(), numWrites);
}
private void VerifyWrite(byte[] buffer, int offset, int count, Encoding encoding)
{
VerifyWrite(buffer, offset, count, encoding, DEFAULT_NUM_WRITES);
}
private void VerifyWrite(byte[] buffer, int offset, int count, Encoding encoding, int numWrites)
{
using (var com1 = new SerialPort(TCSupport.LocalMachineSerialInfo.FirstAvailablePortName))
using (var com2 = new SerialPort(TCSupport.LocalMachineSerialInfo.SecondAvailablePortName))
{
var rndGen = new Random(-55);
Debug.WriteLine("Verifying write method buffer.Lenght={0}, offset={1}, count={2}, endocing={3}",
buffer.Length, offset, count, encoding.EncodingName);
com1.Encoding = encoding;
com2.Encoding = encoding;
com1.Open();
com2.Open();
for (var i = 0; i < buffer.Length; i++)
{
buffer[i] = (byte)rndGen.Next(0, 256);
}
VerifyWriteByteArray(buffer, offset, count, com1, com2, numWrites);
}
}
private void VerifyWriteByteArray(byte[] buffer, int offset, int count, SerialPort com1, SerialPort com2, int numWrites)
{
var index = 0;
var oldBuffer = (byte[])buffer.Clone();
var expectedBytes = new byte[count];
var actualBytes = new byte[expectedBytes.Length * numWrites];
for (var i = 0; i < count; i++)
{
expectedBytes[i] = buffer[i + offset];
}
for (var i = 0; i < numWrites; i++)
{
com1.BaseStream.Write(buffer, offset, count);
}
com2.ReadTimeout = 500;
Thread.Sleep((int)(((expectedBytes.Length * numWrites * 10.0) / com1.BaudRate) * 1000) + 250);
// Make sure buffer was not altered during the write call
for (var i = 0; i < buffer.Length; i++)
{
if (buffer[i] != oldBuffer[i])
{
Fail("ERROR!!!: The contents of the buffer were changed from {0} to {1} at {2}", oldBuffer[i], buffer[i], i);
}
}
while (true)
{
int byteRead;
try
{
byteRead = com2.ReadByte();
}
catch (TimeoutException)
{
break;
}
if (actualBytes.Length <= index)
{
// If we have read in more bytes then we expect
Fail("ERROR!!!: We have received more bytes then were sent");
break;
}
actualBytes[index] = (byte)byteRead;
index++;
if (actualBytes.Length - index != com2.BytesToRead)
{
Fail("ERROR!!!: Expected BytesToRead={0} actual={1}", actualBytes.Length - index, com2.BytesToRead);
}
}
// Compare the bytes that were read with the ones we expected to read
for (var j = 0; j < numWrites; j++)
{
for (var i = 0; i < expectedBytes.Length; i++)
{
if (expectedBytes[i] != actualBytes[i + expectedBytes.Length * j])
{
Fail("ERROR!!!: Expected to read byte {0} actual read {1} at {2}", (int)expectedBytes[i], (int)actualBytes[i + expectedBytes.Length * j], i);
}
}
}
if (com1.IsOpen)
com1.Close();
if (com2.IsOpen)
com2.Close();
}
#endregion
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/Loader/classloader/rmv/il/RMV-2-8-31-two.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern System.Console { }
/* RDawson - 11/2/200
Check that interfaces can have static fields
*/
.assembly Bob{}
.assembly extern mscorlib {}
.class interface Foo{
.field public static int32 Bar
}
.class Bar implements Foo{
.method public specialname void .ctor(){
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.method public static int32 main(){
.entrypoint
.maxstack 8
newobj instance void Bar::.ctor()
pop
ldstr "PASS - 2.8.31 - Was allowed to define an interface with a static field"
call void [System.Console]System.Console::WriteLine(class [mscorlib]System.String)
ldc.i4 100
ret
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern System.Console { }
/* RDawson - 11/2/200
Check that interfaces can have static fields
*/
.assembly Bob{}
.assembly extern mscorlib {}
.class interface Foo{
.field public static int32 Bar
}
.class Bar implements Foo{
.method public specialname void .ctor(){
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.method public static int32 main(){
.entrypoint
.maxstack 8
newobj instance void Bar::.ctor()
pop
ldstr "PASS - 2.8.31 - Was allowed to define an interface with a static field"
call void [System.Console]System.Console::WriteLine(class [mscorlib]System.String)
ldc.i4 100
ret
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./eng/common/init-tools-native.ps1 | <#
.SYNOPSIS
Entry point script for installing native tools
.DESCRIPTION
Reads $RepoRoot\global.json file to determine native assets to install
and executes installers for those tools
.PARAMETER BaseUri
Base file directory or Url from which to acquire tool archives
.PARAMETER InstallDirectory
Directory to install native toolset. This is a command-line override for the default
Install directory precedence order:
- InstallDirectory command-line override
- NETCOREENG_INSTALL_DIRECTORY environment variable
- (default) %USERPROFILE%/.netcoreeng/native
.PARAMETER Clean
Switch specifying to not install anything, but cleanup native asset folders
.PARAMETER Force
Clean and then install tools
.PARAMETER DownloadRetries
Total number of retry attempts
.PARAMETER RetryWaitTimeInSeconds
Wait time between retry attempts in seconds
.PARAMETER GlobalJsonFile
File path to global.json file
.NOTES
#>
[CmdletBinding(PositionalBinding=$false)]
Param (
[string] $BaseUri = 'https://netcorenativeassets.blob.core.windows.net/resource-packages/external',
[string] $InstallDirectory,
[switch] $Clean = $False,
[switch] $Force = $False,
[int] $DownloadRetries = 5,
[int] $RetryWaitTimeInSeconds = 30,
[string] $GlobalJsonFile
)
if (!$GlobalJsonFile) {
$GlobalJsonFile = Join-Path (Get-Item $PSScriptRoot).Parent.Parent.FullName 'global.json'
}
Set-StrictMode -version 2.0
$ErrorActionPreference='Stop'
. $PSScriptRoot\pipeline-logging-functions.ps1
Import-Module -Name (Join-Path $PSScriptRoot 'native\CommonLibrary.psm1')
try {
# Define verbose switch if undefined
$Verbose = $VerbosePreference -Eq 'Continue'
$EngCommonBaseDir = Join-Path $PSScriptRoot 'native\'
$NativeBaseDir = $InstallDirectory
if (!$NativeBaseDir) {
$NativeBaseDir = CommonLibrary\Get-NativeInstallDirectory
}
$Env:CommonLibrary_NativeInstallDir = $NativeBaseDir
$InstallBin = Join-Path $NativeBaseDir 'bin'
$InstallerPath = Join-Path $EngCommonBaseDir 'install-tool.ps1'
# Process tools list
Write-Host "Processing $GlobalJsonFile"
If (-Not (Test-Path $GlobalJsonFile)) {
Write-Host "Unable to find '$GlobalJsonFile'"
exit 0
}
$NativeTools = Get-Content($GlobalJsonFile) -Raw |
ConvertFrom-Json |
Select-Object -Expand 'native-tools' -ErrorAction SilentlyContinue
if ($NativeTools) {
$NativeTools.PSObject.Properties | ForEach-Object {
$ToolName = $_.Name
$ToolVersion = $_.Value
$LocalInstallerArguments = @{ ToolName = "$ToolName" }
$LocalInstallerArguments += @{ InstallPath = "$InstallBin" }
$LocalInstallerArguments += @{ BaseUri = "$BaseUri" }
$LocalInstallerArguments += @{ CommonLibraryDirectory = "$EngCommonBaseDir" }
$LocalInstallerArguments += @{ Version = "$ToolVersion" }
if ($Verbose) {
$LocalInstallerArguments += @{ Verbose = $True }
}
if (Get-Variable 'Force' -ErrorAction 'SilentlyContinue') {
if($Force) {
$LocalInstallerArguments += @{ Force = $True }
}
}
if ($Clean) {
$LocalInstallerArguments += @{ Clean = $True }
}
Write-Verbose "Installing $ToolName version $ToolVersion"
Write-Verbose "Executing '$InstallerPath $($LocalInstallerArguments.Keys.ForEach({"-$_ '$($LocalInstallerArguments.$_)'"}) -join ' ')'"
& $InstallerPath @LocalInstallerArguments
if ($LASTEXITCODE -Ne "0") {
$errMsg = "$ToolName installation failed"
if ((Get-Variable 'DoNotAbortNativeToolsInstallationOnFailure' -ErrorAction 'SilentlyContinue') -and $DoNotAbortNativeToolsInstallationOnFailure) {
$showNativeToolsWarning = $true
if ((Get-Variable 'DoNotDisplayNativeToolsInstallationWarnings' -ErrorAction 'SilentlyContinue') -and $DoNotDisplayNativeToolsInstallationWarnings) {
$showNativeToolsWarning = $false
}
if ($showNativeToolsWarning) {
Write-Warning $errMsg
}
$toolInstallationFailure = $true
} else {
# We cannot change this to Write-PipelineTelemetryError because of https://github.com/dotnet/arcade/issues/4482
Write-Host $errMsg
exit 1
}
}
}
if ((Get-Variable 'toolInstallationFailure' -ErrorAction 'SilentlyContinue') -and $toolInstallationFailure) {
# We cannot change this to Write-PipelineTelemetryError because of https://github.com/dotnet/arcade/issues/4482
Write-Host 'Native tools bootstrap failed'
exit 1
}
}
else {
Write-Host 'No native tools defined in global.json'
exit 0
}
if ($Clean) {
exit 0
}
if (Test-Path $InstallBin) {
Write-Host 'Native tools are available from ' (Convert-Path -Path $InstallBin)
Write-Host "##vso[task.prependpath]$(Convert-Path -Path $InstallBin)"
return $InstallBin
}
else {
Write-PipelineTelemetryError -Category 'NativeToolsBootstrap' -Message 'Native tools install directory does not exist, installation failed'
exit 1
}
exit 0
}
catch {
Write-Host $_.ScriptStackTrace
Write-PipelineTelemetryError -Category 'NativeToolsBootstrap' -Message $_
ExitWithExitCode 1
}
| <#
.SYNOPSIS
Entry point script for installing native tools
.DESCRIPTION
Reads $RepoRoot\global.json file to determine native assets to install
and executes installers for those tools
.PARAMETER BaseUri
Base file directory or Url from which to acquire tool archives
.PARAMETER InstallDirectory
Directory to install native toolset. This is a command-line override for the default
Install directory precedence order:
- InstallDirectory command-line override
- NETCOREENG_INSTALL_DIRECTORY environment variable
- (default) %USERPROFILE%/.netcoreeng/native
.PARAMETER Clean
Switch specifying to not install anything, but cleanup native asset folders
.PARAMETER Force
Clean and then install tools
.PARAMETER DownloadRetries
Total number of retry attempts
.PARAMETER RetryWaitTimeInSeconds
Wait time between retry attempts in seconds
.PARAMETER GlobalJsonFile
File path to global.json file
.NOTES
#>
[CmdletBinding(PositionalBinding=$false)]
Param (
[string] $BaseUri = 'https://netcorenativeassets.blob.core.windows.net/resource-packages/external',
[string] $InstallDirectory,
[switch] $Clean = $False,
[switch] $Force = $False,
[int] $DownloadRetries = 5,
[int] $RetryWaitTimeInSeconds = 30,
[string] $GlobalJsonFile
)
if (!$GlobalJsonFile) {
$GlobalJsonFile = Join-Path (Get-Item $PSScriptRoot).Parent.Parent.FullName 'global.json'
}
Set-StrictMode -version 2.0
$ErrorActionPreference='Stop'
. $PSScriptRoot\pipeline-logging-functions.ps1
Import-Module -Name (Join-Path $PSScriptRoot 'native\CommonLibrary.psm1')
try {
# Define verbose switch if undefined
$Verbose = $VerbosePreference -Eq 'Continue'
$EngCommonBaseDir = Join-Path $PSScriptRoot 'native\'
$NativeBaseDir = $InstallDirectory
if (!$NativeBaseDir) {
$NativeBaseDir = CommonLibrary\Get-NativeInstallDirectory
}
$Env:CommonLibrary_NativeInstallDir = $NativeBaseDir
$InstallBin = Join-Path $NativeBaseDir 'bin'
$InstallerPath = Join-Path $EngCommonBaseDir 'install-tool.ps1'
# Process tools list
Write-Host "Processing $GlobalJsonFile"
If (-Not (Test-Path $GlobalJsonFile)) {
Write-Host "Unable to find '$GlobalJsonFile'"
exit 0
}
$NativeTools = Get-Content($GlobalJsonFile) -Raw |
ConvertFrom-Json |
Select-Object -Expand 'native-tools' -ErrorAction SilentlyContinue
if ($NativeTools) {
$NativeTools.PSObject.Properties | ForEach-Object {
$ToolName = $_.Name
$ToolVersion = $_.Value
$LocalInstallerArguments = @{ ToolName = "$ToolName" }
$LocalInstallerArguments += @{ InstallPath = "$InstallBin" }
$LocalInstallerArguments += @{ BaseUri = "$BaseUri" }
$LocalInstallerArguments += @{ CommonLibraryDirectory = "$EngCommonBaseDir" }
$LocalInstallerArguments += @{ Version = "$ToolVersion" }
if ($Verbose) {
$LocalInstallerArguments += @{ Verbose = $True }
}
if (Get-Variable 'Force' -ErrorAction 'SilentlyContinue') {
if($Force) {
$LocalInstallerArguments += @{ Force = $True }
}
}
if ($Clean) {
$LocalInstallerArguments += @{ Clean = $True }
}
Write-Verbose "Installing $ToolName version $ToolVersion"
Write-Verbose "Executing '$InstallerPath $($LocalInstallerArguments.Keys.ForEach({"-$_ '$($LocalInstallerArguments.$_)'"}) -join ' ')'"
& $InstallerPath @LocalInstallerArguments
if ($LASTEXITCODE -Ne "0") {
$errMsg = "$ToolName installation failed"
if ((Get-Variable 'DoNotAbortNativeToolsInstallationOnFailure' -ErrorAction 'SilentlyContinue') -and $DoNotAbortNativeToolsInstallationOnFailure) {
$showNativeToolsWarning = $true
if ((Get-Variable 'DoNotDisplayNativeToolsInstallationWarnings' -ErrorAction 'SilentlyContinue') -and $DoNotDisplayNativeToolsInstallationWarnings) {
$showNativeToolsWarning = $false
}
if ($showNativeToolsWarning) {
Write-Warning $errMsg
}
$toolInstallationFailure = $true
} else {
# We cannot change this to Write-PipelineTelemetryError because of https://github.com/dotnet/arcade/issues/4482
Write-Host $errMsg
exit 1
}
}
}
if ((Get-Variable 'toolInstallationFailure' -ErrorAction 'SilentlyContinue') -and $toolInstallationFailure) {
# We cannot change this to Write-PipelineTelemetryError because of https://github.com/dotnet/arcade/issues/4482
Write-Host 'Native tools bootstrap failed'
exit 1
}
}
else {
Write-Host 'No native tools defined in global.json'
exit 0
}
if ($Clean) {
exit 0
}
if (Test-Path $InstallBin) {
Write-Host 'Native tools are available from ' (Convert-Path -Path $InstallBin)
Write-Host "##vso[task.prependpath]$(Convert-Path -Path $InstallBin)"
return $InstallBin
}
else {
Write-PipelineTelemetryError -Category 'NativeToolsBootstrap' -Message 'Native tools install directory does not exist, installation failed'
exit 1
}
exit 0
}
catch {
Write-Host $_.ScriptStackTrace
Write-PipelineTelemetryError -Category 'NativeToolsBootstrap' -Message $_
ExitWithExitCode 1
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Directed/PREFIX/unaligned/1/initobj.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="initobj.il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="initobj.il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/CodeGenBringUpTests/Args5.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
public class BringUpTest_Args5
{
const int Pass = 100;
const int Fail = -1;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Args5(int a, int b, int c, int d, int e)
{
return a+b+c+d+e;
}
public static int Main()
{
int y = Args5(1,2,3,4,5);
if (y == 15) return Pass;
else return Fail;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
public class BringUpTest_Args5
{
const int Pass = 100;
const int Fail = -1;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Args5(int a, int b, int c, int d, int e)
{
return a+b+c+d+e;
}
public static int Main()
{
int y = Args5(1,2,3,4,5);
if (y == 15) return Pass;
else return Fail;
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b75250/b75250.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
class testout1
{
public struct VT
{
public double a1;
public double a2;
public long a3;
public double a4;
public double a5;
public long a6;
public long a7;
}
public class CL
{
public int a0 = 5;
}
public static VT vtstatic = new VT();
public static long Func(CL cl, VT vt)
{
vtstatic.a1 = 18;
vtstatic.a2 = 2;
vtstatic.a3 = 5L;
vtstatic.a4 = 35;
vtstatic.a5 = 8;
vtstatic.a6 = -6L;
vtstatic.a7 = 1L;
long retval = Convert.ToInt64((((long)(Convert.ToInt32(cl.a0 / vtstatic.a5) + (long)(Convert.ToInt32(57) - (long)(-70L))) + (long)(vt.a6 * vt.a4)) + (long)((long)(Convert.ToInt32(1787522586) - (long)((vtstatic.a3 + (long)(Convert.ToInt32(1787522586) - (long)(56L))))) * (vt.a4 - vtstatic.a1)) - (long)(vtstatic.a7 * vt.a2)));
return retval;
}
public static int Main()
{
VT vt = new VT();
vt.a1 = 5;
vt.a2 = 1;
vt.a3 = 4L;
vt.a4 = 3;
vt.a5 = 2;
vt.a6 = -1L;
vt.a7 = 6L;
CL cl = new CL();
long val = Func(cl, vt);
return 100;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
class testout1
{
public struct VT
{
public double a1;
public double a2;
public long a3;
public double a4;
public double a5;
public long a6;
public long a7;
}
public class CL
{
public int a0 = 5;
}
public static VT vtstatic = new VT();
public static long Func(CL cl, VT vt)
{
vtstatic.a1 = 18;
vtstatic.a2 = 2;
vtstatic.a3 = 5L;
vtstatic.a4 = 35;
vtstatic.a5 = 8;
vtstatic.a6 = -6L;
vtstatic.a7 = 1L;
long retval = Convert.ToInt64((((long)(Convert.ToInt32(cl.a0 / vtstatic.a5) + (long)(Convert.ToInt32(57) - (long)(-70L))) + (long)(vt.a6 * vt.a4)) + (long)((long)(Convert.ToInt32(1787522586) - (long)((vtstatic.a3 + (long)(Convert.ToInt32(1787522586) - (long)(56L))))) * (vt.a4 - vtstatic.a1)) - (long)(vtstatic.a7 * vt.a2)));
return retval;
}
public static int Main()
{
VT vt = new VT();
vt.a1 = 5;
vt.a2 = 1;
vt.a3 = 4L;
vt.a4 = 3;
vt.a5 = 2;
vt.a6 = -1L;
vt.a7 = 6L;
CL cl = new CL();
long val = Func(cl, vt);
return 100;
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Private.Xml/src/System/Xml/Base64EncoderAsync.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Text;
using System.Diagnostics;
using System.Threading.Tasks;
namespace System.Xml
{
internal abstract partial class Base64Encoder
{
internal abstract Task WriteCharsAsync(char[] chars, int index, int count);
internal async Task EncodeAsync(byte[] buffer!!, int index, int count)
{
if (index < 0)
{
throw new ArgumentOutOfRangeException(nameof(index));
}
if (count < 0)
{
throw new ArgumentOutOfRangeException(nameof(count));
}
if (count > buffer.Length - index)
{
throw new ArgumentOutOfRangeException(nameof(count));
}
// encode left-over buffer
if (_leftOverBytesCount > 0)
{
int i = _leftOverBytesCount;
while (i < 3 && count > 0)
{
_leftOverBytes![i++] = buffer[index++];
count--;
}
// the total number of buffer we have is less than 3 -> return
if (count == 0 && i < 3)
{
_leftOverBytesCount = i;
return;
}
// encode the left-over buffer and write out
int leftOverChars = Convert.ToBase64CharArray(_leftOverBytes!, 0, 3, _charsLine, 0);
await WriteCharsAsync(_charsLine, 0, leftOverChars).ConfigureAwait(false);
}
// store new left-over buffer
_leftOverBytesCount = count % 3;
if (_leftOverBytesCount > 0)
{
count -= _leftOverBytesCount;
if (_leftOverBytes == null)
{
_leftOverBytes = new byte[3];
}
for (int i = 0; i < _leftOverBytesCount; i++)
{
_leftOverBytes[i] = buffer[index + count + i];
}
}
// encode buffer in 76 character long chunks
int endIndex = index + count;
int chunkSize = LineSizeInBytes;
while (index < endIndex)
{
if (index + chunkSize > endIndex)
{
chunkSize = endIndex - index;
}
int charCount = Convert.ToBase64CharArray(buffer, index, chunkSize, _charsLine, 0);
await WriteCharsAsync(_charsLine, 0, charCount).ConfigureAwait(false);
index += chunkSize;
}
}
internal async Task FlushAsync()
{
if (_leftOverBytesCount > 0)
{
int leftOverChars = Convert.ToBase64CharArray(_leftOverBytes!, 0, _leftOverBytesCount, _charsLine, 0);
await WriteCharsAsync(_charsLine, 0, leftOverChars).ConfigureAwait(false);
_leftOverBytesCount = 0;
}
}
}
internal sealed partial class XmlTextWriterBase64Encoder : Base64Encoder
{
internal override Task WriteCharsAsync(char[] chars, int index, int count)
{
throw new NotImplementedException();
}
}
internal sealed partial class XmlRawWriterBase64Encoder : Base64Encoder
{
internal override Task WriteCharsAsync(char[] chars, int index, int count)
{
return _rawWriter.WriteRawAsync(chars, index, count);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Text;
using System.Diagnostics;
using System.Threading.Tasks;
namespace System.Xml
{
internal abstract partial class Base64Encoder
{
internal abstract Task WriteCharsAsync(char[] chars, int index, int count);
internal async Task EncodeAsync(byte[] buffer!!, int index, int count)
{
if (index < 0)
{
throw new ArgumentOutOfRangeException(nameof(index));
}
if (count < 0)
{
throw new ArgumentOutOfRangeException(nameof(count));
}
if (count > buffer.Length - index)
{
throw new ArgumentOutOfRangeException(nameof(count));
}
// encode left-over buffer
if (_leftOverBytesCount > 0)
{
int i = _leftOverBytesCount;
while (i < 3 && count > 0)
{
_leftOverBytes![i++] = buffer[index++];
count--;
}
// the total number of buffer we have is less than 3 -> return
if (count == 0 && i < 3)
{
_leftOverBytesCount = i;
return;
}
// encode the left-over buffer and write out
int leftOverChars = Convert.ToBase64CharArray(_leftOverBytes!, 0, 3, _charsLine, 0);
await WriteCharsAsync(_charsLine, 0, leftOverChars).ConfigureAwait(false);
}
// store new left-over buffer
_leftOverBytesCount = count % 3;
if (_leftOverBytesCount > 0)
{
count -= _leftOverBytesCount;
if (_leftOverBytes == null)
{
_leftOverBytes = new byte[3];
}
for (int i = 0; i < _leftOverBytesCount; i++)
{
_leftOverBytes[i] = buffer[index + count + i];
}
}
// encode buffer in 76 character long chunks
int endIndex = index + count;
int chunkSize = LineSizeInBytes;
while (index < endIndex)
{
if (index + chunkSize > endIndex)
{
chunkSize = endIndex - index;
}
int charCount = Convert.ToBase64CharArray(buffer, index, chunkSize, _charsLine, 0);
await WriteCharsAsync(_charsLine, 0, charCount).ConfigureAwait(false);
index += chunkSize;
}
}
internal async Task FlushAsync()
{
if (_leftOverBytesCount > 0)
{
int leftOverChars = Convert.ToBase64CharArray(_leftOverBytes!, 0, _leftOverBytesCount, _charsLine, 0);
await WriteCharsAsync(_charsLine, 0, leftOverChars).ConfigureAwait(false);
_leftOverBytesCount = 0;
}
}
}
internal sealed partial class XmlTextWriterBase64Encoder : Base64Encoder
{
internal override Task WriteCharsAsync(char[] chars, int index, int count)
{
throw new NotImplementedException();
}
}
internal sealed partial class XmlRawWriterBase64Encoder : Base64Encoder
{
internal override Task WriteCharsAsync(char[] chars, int index, int count)
{
return _rawWriter.WriteRawAsync(chars, index, count);
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/pal/src/libunwind/src/unwind/GetBSP.c | /* libunwind - a platform-independent unwind library
Copyright (C) 2003-2004 Hewlett-Packard Co
Contributed by David Mosberger-Tang <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind-internal.h"
unsigned long
_Unwind_GetBSP (struct _Unwind_Context *context)
{
#ifdef UNW_TARGET_IA64
unw_word_t val;
unw_get_reg (&context->cursor, UNW_IA64_BSP, &val);
return val;
#else
return 0;
#endif
}
unsigned long __libunwind_Unwind_GetBSP (struct _Unwind_Context *)
ALIAS (_Unwind_GetBSP);
| /* libunwind - a platform-independent unwind library
Copyright (C) 2003-2004 Hewlett-Packard Co
Contributed by David Mosberger-Tang <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind-internal.h"
unsigned long
_Unwind_GetBSP (struct _Unwind_Context *context)
{
#ifdef UNW_TARGET_IA64
unw_word_t val;
unw_get_reg (&context->cursor, UNW_IA64_BSP, &val);
return val;
#else
return 0;
#endif
}
unsigned long __libunwind_Unwind_GetBSP (struct _Unwind_Context *)
ALIAS (_Unwind_GetBSP);
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b42009/b42009.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
namespace Test
{
using System;
class BB
{
static int Static1(long arg1, ref int[] arg2, int[] arg3, int arg4)
{ return 0; }
static void Static2(ref int[] arg)
{
Static1(
Static1(0, ref arg, arg, arg[0]),
ref arg,
arg,
arg[Static1(0, ref arg, arg, arg[0])]
);
}
static int Main()
{
int[] an = new int[2];
Static2(ref an);
return 100;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
namespace Test
{
using System;
class BB
{
static int Static1(long arg1, ref int[] arg2, int[] arg3, int arg4)
{ return 0; }
static void Static2(ref int[] arg)
{
Static1(
Static1(0, ref arg, arg, arg[0]),
ref arg,
arg,
arg[Static1(0, ref arg, arg, arg[0])]
);
}
static int Main()
{
int[] an = new int[2];
Static2(ref an);
return 100;
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/CallerMemberNameAttribute.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Runtime.CompilerServices
{
[AttributeUsage(AttributeTargets.Parameter, Inherited = false)]
public sealed class CallerMemberNameAttribute : Attribute
{
public CallerMemberNameAttribute()
{
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Runtime.CompilerServices
{
[AttributeUsage(AttributeTargets.Parameter, Inherited = false)]
public sealed class CallerMemberNameAttribute : Attribute
{
public CallerMemberNameAttribute()
{
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/jit64/opt/rngchk/SimpleArray_01.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.CompilerServices;
namespace SimpleArray_01
{
public delegate void RngTest();
internal class Class1
{
private static int Main()
{
int retVal = 100;
int testNum = 0;
RngTest[] Tests ={ new RngTest(Test.Test1),
new RngTest(Test.Test2),
new RngTest(Test.Test3),
new RngTest(Test.Test4),
new RngTest(Test.Test5),
new RngTest(Test.Test6)};
foreach (RngTest test in Tests)
{
testNum++;
if (DoTest(test))
{
Console.WriteLine("Test {0} Passed", testNum);
}
else
{
Console.WriteLine("Test {0} Failed", testNum);
retVal = 1;
}
}
return retVal;
}
//Test shall throw IndexOutOfRangeException if rangecheck is inserted properly
private static bool DoTest(RngTest Test)
{
bool bResult = false;
try
{
Test();
}
catch (System.IndexOutOfRangeException)
{
bResult = true;
}
catch (Exception e)
{
Console.WriteLine(e.Message);
}
return bResult;
}
}
internal class Test
{
/********************************************************************************************
* RngChk shall not be eliminated when direct access to an out bound element within the loop
*********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test1()
{
int[] numbers = new int[100];
int index = 0;
for (index = 0; index < numbers.Length; index++)
{
numbers[101] = index * index;
}
}
/********************************************************************************************
* RngChk shall not be eliminated if the loop upper limit is larger than the array bound
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test2()
{
int[] numbers = new int[100];
int index = 0;
for (index = 0; index < 101; index++)
{
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is eliminated properly when reverse iterate through the array
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test3()
{
int[] numbers = new int[100];
int index = 0;
for (index = numbers.Length; index >= 0; index--)
{
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is not eliminated if the array is modified
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test4()
{
int[] numbers = new int[100];
int[] numbers2 = new int[99];
int index = 0;
for (index = 0; index < numbers.Length; index++)
{
if (index > 98)
numbers = numbers2;
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is not eliminated if the upper limit of the array is modified
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test5()
{
int[] numbers;
int index = 0;
numbers = new int[100];
int upper = 99;
for (index = 0; index < upper; index++, upper++)
{
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is not eliminated if induction variable is modified
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test6()
{
int[] numbers;
int index = 0;
numbers = new int[101];
for (index = 0; index++ < numbers.Length; index++)
{
numbers[index] = index * index;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.CompilerServices;
namespace SimpleArray_01
{
public delegate void RngTest();
internal class Class1
{
private static int Main()
{
int retVal = 100;
int testNum = 0;
RngTest[] Tests ={ new RngTest(Test.Test1),
new RngTest(Test.Test2),
new RngTest(Test.Test3),
new RngTest(Test.Test4),
new RngTest(Test.Test5),
new RngTest(Test.Test6)};
foreach (RngTest test in Tests)
{
testNum++;
if (DoTest(test))
{
Console.WriteLine("Test {0} Passed", testNum);
}
else
{
Console.WriteLine("Test {0} Failed", testNum);
retVal = 1;
}
}
return retVal;
}
//Test shall throw IndexOutOfRangeException if rangecheck is inserted properly
private static bool DoTest(RngTest Test)
{
bool bResult = false;
try
{
Test();
}
catch (System.IndexOutOfRangeException)
{
bResult = true;
}
catch (Exception e)
{
Console.WriteLine(e.Message);
}
return bResult;
}
}
internal class Test
{
/********************************************************************************************
* RngChk shall not be eliminated when direct access to an out bound element within the loop
*********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test1()
{
int[] numbers = new int[100];
int index = 0;
for (index = 0; index < numbers.Length; index++)
{
numbers[101] = index * index;
}
}
/********************************************************************************************
* RngChk shall not be eliminated if the loop upper limit is larger than the array bound
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test2()
{
int[] numbers = new int[100];
int index = 0;
for (index = 0; index < 101; index++)
{
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is eliminated properly when reverse iterate through the array
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test3()
{
int[] numbers = new int[100];
int index = 0;
for (index = numbers.Length; index >= 0; index--)
{
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is not eliminated if the array is modified
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test4()
{
int[] numbers = new int[100];
int[] numbers2 = new int[99];
int index = 0;
for (index = 0; index < numbers.Length; index++)
{
if (index > 98)
numbers = numbers2;
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is not eliminated if the upper limit of the array is modified
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test5()
{
int[] numbers;
int index = 0;
numbers = new int[100];
int upper = 99;
for (index = 0; index < upper; index++, upper++)
{
numbers[index] = index * index;
}
}
/********************************************************************************************
* RngChk is not eliminated if induction variable is modified
********************************************************************************************/
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static void Test6()
{
int[] numbers;
int index = 0;
numbers = new int[101];
for (index = 0; index++ < numbers.Length; index++)
{
numbers[index] = index * index;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/RuntimeFunctionsTableNode.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using Internal.Text;
using Internal.TypeSystem;
using Debug = System.Diagnostics.Debug;
namespace ILCompiler.DependencyAnalysis.ReadyToRun
{
public class RuntimeFunctionsTableNode : HeaderTableNode
{
private List<MethodWithGCInfo> _methodNodes;
private Dictionary<MethodWithGCInfo, int> _insertedMethodNodes;
private readonly NodeFactory _nodeFactory;
private int _tableSize = -1;
public RuntimeFunctionsTableNode(NodeFactory nodeFactory)
: base(nodeFactory.Target)
{
_nodeFactory = nodeFactory;
}
public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb)
{
sb.Append(nameMangler.CompilationUnitPrefix);
sb.Append("__ReadyToRunRuntimeFunctionsTable");
}
public int GetIndex(MethodWithGCInfo method)
{
#if DEBUG
Debug.Assert(_nodeFactory.MarkingComplete);
Debug.Assert(method.Marked);
#endif
if (_methodNodes == null)
LayoutRuntimeFunctions();
return _insertedMethodNodes[method];
}
private void LayoutRuntimeFunctions()
{
_methodNodes = new List<MethodWithGCInfo>();
_insertedMethodNodes = new Dictionary<MethodWithGCInfo, int>();
int runtimeFunctionIndex = 0;
foreach (MethodWithGCInfo method in _nodeFactory.EnumerateCompiledMethods())
{
_methodNodes.Add(method);
_insertedMethodNodes[method] = runtimeFunctionIndex;
runtimeFunctionIndex += method.FrameInfos.Length;
}
}
public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false)
{
// This node does not trigger generation of other nodes.
if (relocsOnly)
return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolDefinitionNode[] { this });
if (_methodNodes == null)
LayoutRuntimeFunctions();
ObjectDataBuilder runtimeFunctionsBuilder = new ObjectDataBuilder(factory, relocsOnly);
runtimeFunctionsBuilder.RequireInitialAlignment(4);
// Add the symbol representing this object node
runtimeFunctionsBuilder.AddSymbol(this);
foreach (MethodWithGCInfo method in _methodNodes)
{
int[] funcletOffsets = method.GCInfoNode.CalculateFuncletOffsets(factory);
for (int frameIndex = 0; frameIndex < method.FrameInfos.Length; frameIndex++)
{
FrameInfo frameInfo = method.FrameInfos[frameIndex];
// StartOffset of the runtime function
int codeDelta = 0;
if (Target.Architecture == TargetArchitecture.ARM)
{
// THUMB_CODE
codeDelta = 1;
}
runtimeFunctionsBuilder.EmitReloc(method, RelocType.IMAGE_REL_BASED_ADDR32NB, delta: frameInfo.StartOffset + codeDelta);
if (!relocsOnly && Target.Architecture == TargetArchitecture.X64)
{
// On Amd64, the 2nd word contains the EndOffset of the runtime function
runtimeFunctionsBuilder.EmitReloc(method, RelocType.IMAGE_REL_BASED_ADDR32NB, delta: frameInfo.EndOffset);
}
runtimeFunctionsBuilder.EmitReloc(factory.RuntimeFunctionsGCInfo.StartSymbol, RelocType.IMAGE_REL_BASED_ADDR32NB, funcletOffsets[frameIndex]);
}
}
// Emit sentinel entry
runtimeFunctionsBuilder.EmitUInt(~0u);
_tableSize = runtimeFunctionsBuilder.CountBytes;
return runtimeFunctionsBuilder.ToObjectData();
}
/// <summary>
/// Returns the runtime functions table size and excludes the 4 byte sentinel entry at the end (used by
/// the runtime in NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod) so that it's not treated as
/// part of the table itself.
/// </summary>
public int TableSizeExcludingSentinel
{
get
{
Debug.Assert(_tableSize >= 0);
return _tableSize + SentinelSizeAdjustment;
}
}
public override int ClassCode => -855231428;
internal const int SentinelSizeAdjustment = -4;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using Internal.Text;
using Internal.TypeSystem;
using Debug = System.Diagnostics.Debug;
namespace ILCompiler.DependencyAnalysis.ReadyToRun
{
public class RuntimeFunctionsTableNode : HeaderTableNode
{
private List<MethodWithGCInfo> _methodNodes;
private Dictionary<MethodWithGCInfo, int> _insertedMethodNodes;
private readonly NodeFactory _nodeFactory;
private int _tableSize = -1;
public RuntimeFunctionsTableNode(NodeFactory nodeFactory)
: base(nodeFactory.Target)
{
_nodeFactory = nodeFactory;
}
public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb)
{
sb.Append(nameMangler.CompilationUnitPrefix);
sb.Append("__ReadyToRunRuntimeFunctionsTable");
}
public int GetIndex(MethodWithGCInfo method)
{
#if DEBUG
Debug.Assert(_nodeFactory.MarkingComplete);
Debug.Assert(method.Marked);
#endif
if (_methodNodes == null)
LayoutRuntimeFunctions();
return _insertedMethodNodes[method];
}
private void LayoutRuntimeFunctions()
{
_methodNodes = new List<MethodWithGCInfo>();
_insertedMethodNodes = new Dictionary<MethodWithGCInfo, int>();
int runtimeFunctionIndex = 0;
foreach (MethodWithGCInfo method in _nodeFactory.EnumerateCompiledMethods())
{
_methodNodes.Add(method);
_insertedMethodNodes[method] = runtimeFunctionIndex;
runtimeFunctionIndex += method.FrameInfos.Length;
}
}
public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false)
{
// This node does not trigger generation of other nodes.
if (relocsOnly)
return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolDefinitionNode[] { this });
if (_methodNodes == null)
LayoutRuntimeFunctions();
ObjectDataBuilder runtimeFunctionsBuilder = new ObjectDataBuilder(factory, relocsOnly);
runtimeFunctionsBuilder.RequireInitialAlignment(4);
// Add the symbol representing this object node
runtimeFunctionsBuilder.AddSymbol(this);
foreach (MethodWithGCInfo method in _methodNodes)
{
int[] funcletOffsets = method.GCInfoNode.CalculateFuncletOffsets(factory);
for (int frameIndex = 0; frameIndex < method.FrameInfos.Length; frameIndex++)
{
FrameInfo frameInfo = method.FrameInfos[frameIndex];
// StartOffset of the runtime function
int codeDelta = 0;
if (Target.Architecture == TargetArchitecture.ARM)
{
// THUMB_CODE
codeDelta = 1;
}
runtimeFunctionsBuilder.EmitReloc(method, RelocType.IMAGE_REL_BASED_ADDR32NB, delta: frameInfo.StartOffset + codeDelta);
if (!relocsOnly && Target.Architecture == TargetArchitecture.X64)
{
// On Amd64, the 2nd word contains the EndOffset of the runtime function
runtimeFunctionsBuilder.EmitReloc(method, RelocType.IMAGE_REL_BASED_ADDR32NB, delta: frameInfo.EndOffset);
}
runtimeFunctionsBuilder.EmitReloc(factory.RuntimeFunctionsGCInfo.StartSymbol, RelocType.IMAGE_REL_BASED_ADDR32NB, funcletOffsets[frameIndex]);
}
}
// Emit sentinel entry
runtimeFunctionsBuilder.EmitUInt(~0u);
_tableSize = runtimeFunctionsBuilder.CountBytes;
return runtimeFunctionsBuilder.ToObjectData();
}
/// <summary>
/// Returns the runtime functions table size and excludes the 4 byte sentinel entry at the end (used by
/// the runtime in NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod) so that it's not treated as
/// part of the table itself.
/// </summary>
public int TableSizeExcludingSentinel
{
get
{
Debug.Assert(_tableSize >= 0);
return _tableSize + SentinelSizeAdjustment;
}
}
public override int ClassCode => -855231428;
internal const int SentinelSizeAdjustment = -4;
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/jit/emitpub.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/************************************************************************/
/* Overall emitter control (including startup and shutdown) */
/************************************************************************/
static void emitInit();
static void emitDone();
void emitBegCG(Compiler* comp, COMP_HANDLE cmpHandle);
void emitEndCG();
void emitBegFN(bool hasFramePtr
#if defined(DEBUG)
,
bool checkAlign
#endif
,
unsigned maxTmpSize);
void emitEndFN();
void emitComputeCodeSizes();
unsigned emitEndCodeGen(Compiler* comp,
bool contTrkPtrLcls,
bool fullyInt,
bool fullPtrMap,
unsigned xcptnsCount,
unsigned* prologSize,
unsigned* epilogSize,
void** codeAddr,
void** coldCodeAddr,
void** consAddr DEBUGARG(unsigned* instrCount));
/************************************************************************/
/* Method prolog and epilog */
/************************************************************************/
unsigned emitGetEpilogCnt();
template <typename Callback>
bool emitGenNoGCLst(Callback& cb);
void emitBegProlog();
unsigned emitGetPrologOffsetEstimate();
void emitMarkPrologEnd();
void emitEndProlog();
void emitCreatePlaceholderIG(insGroupPlaceholderType igType,
BasicBlock* igBB,
VARSET_VALARG_TP GCvars,
regMaskTP gcrefRegs,
regMaskTP byrefRegs,
bool last);
void emitGeneratePrologEpilog();
void emitStartPrologEpilogGeneration();
void emitFinishPrologEpilogGeneration();
/************************************************************************/
/* Record a code position and later convert it to offset */
/************************************************************************/
void* emitCurBlock();
unsigned emitCurOffset();
UNATIVE_OFFSET emitCodeOffset(void* blockPtr, unsigned codeOffs);
#ifdef DEBUG
const char* emitOffsetToLabel(unsigned offs);
#endif // DEBUG
/************************************************************************/
/* Output target-independent instructions */
/************************************************************************/
void emitIns_J(instruction ins, BasicBlock* dst, int instrCount = 0);
/************************************************************************/
/* Emit initialized data sections */
/************************************************************************/
UNATIVE_OFFSET emitDataGenBeg(unsigned size, unsigned alignment, var_types dataType);
UNATIVE_OFFSET emitBBTableDataGenBeg(unsigned numEntries, bool relativeAddr);
void emitDataGenData(unsigned offs, const void* data, UNATIVE_OFFSET size);
void emitDataGenData(unsigned offs, BasicBlock* label);
void emitDataGenEnd();
static const UNATIVE_OFFSET INVALID_UNATIVE_OFFSET = (UNATIVE_OFFSET)-1;
UNATIVE_OFFSET emitDataGenFind(const void* cnsAddr, unsigned size, unsigned alignment, var_types dataType);
UNATIVE_OFFSET emitDataConst(const void* cnsAddr, unsigned cnsSize, unsigned cnsAlign, var_types dataType);
UNATIVE_OFFSET emitDataSize();
/************************************************************************/
/* Instruction information */
/************************************************************************/
#ifdef TARGET_XARCH
static bool instrIs3opImul(instruction ins);
static bool instrIsExtendedReg3opImul(instruction ins);
static bool instrHasImplicitRegPairDest(instruction ins);
static void check3opImulValues();
static regNumber inst3opImulReg(instruction ins);
static instruction inst3opImulForReg(regNumber reg);
#endif
/************************************************************************/
/* Emit PDB offset translation information */
/************************************************************************/
#ifdef TRANSLATE_PDB
static void SetILBaseOfCode(BYTE* pTextBase);
static void SetILMethodBase(BYTE* pMethodEntry);
static void SetILMethodStart(BYTE* pMethodCode);
static void SetImgBaseOfCode(BYTE* pTextBase);
void SetIDBaseToProlog();
void SetIDBaseToOffset(int methodOffset);
static void DisablePDBTranslation();
static bool IsPDBEnabled();
static void InitTranslationMaps(int ilCodeSize);
static void DeleteTranslationMaps();
static void InitTranslator(PDBRewriter* pPDB, int* rgSecMap, IMAGE_SECTION_HEADER** rgpHeader, int numSections);
#endif
/************************************************************************/
/* Interface for generating unwind information */
/************************************************************************/
#ifdef TARGET_ARMARCH
bool emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment = NULL);
void emitSplit(emitLocation* startLoc,
emitLocation* endLoc,
UNATIVE_OFFSET maxSplitSize,
void* context,
emitSplitCallbackType callbackFunc);
void emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp);
#endif // TARGET_ARMARCH
#if defined(TARGET_ARM)
unsigned emitGetInstructionSize(emitLocation* emitLoc);
#endif // defined(TARGET_ARM)
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/************************************************************************/
/* Overall emitter control (including startup and shutdown) */
/************************************************************************/
static void emitInit();
static void emitDone();
void emitBegCG(Compiler* comp, COMP_HANDLE cmpHandle);
void emitEndCG();
void emitBegFN(bool hasFramePtr
#if defined(DEBUG)
,
bool checkAlign
#endif
,
unsigned maxTmpSize);
void emitEndFN();
void emitComputeCodeSizes();
unsigned emitEndCodeGen(Compiler* comp,
bool contTrkPtrLcls,
bool fullyInt,
bool fullPtrMap,
unsigned xcptnsCount,
unsigned* prologSize,
unsigned* epilogSize,
void** codeAddr,
void** coldCodeAddr,
void** consAddr DEBUGARG(unsigned* instrCount));
/************************************************************************/
/* Method prolog and epilog */
/************************************************************************/
unsigned emitGetEpilogCnt();
template <typename Callback>
bool emitGenNoGCLst(Callback& cb);
void emitBegProlog();
unsigned emitGetPrologOffsetEstimate();
void emitMarkPrologEnd();
void emitEndProlog();
void emitCreatePlaceholderIG(insGroupPlaceholderType igType,
BasicBlock* igBB,
VARSET_VALARG_TP GCvars,
regMaskTP gcrefRegs,
regMaskTP byrefRegs,
bool last);
void emitGeneratePrologEpilog();
void emitStartPrologEpilogGeneration();
void emitFinishPrologEpilogGeneration();
/************************************************************************/
/* Record a code position and later convert it to offset */
/************************************************************************/
void* emitCurBlock();
unsigned emitCurOffset();
UNATIVE_OFFSET emitCodeOffset(void* blockPtr, unsigned codeOffs);
#ifdef DEBUG
const char* emitOffsetToLabel(unsigned offs);
#endif // DEBUG
/************************************************************************/
/* Output target-independent instructions */
/************************************************************************/
void emitIns_J(instruction ins, BasicBlock* dst, int instrCount = 0);
/************************************************************************/
/* Emit initialized data sections */
/************************************************************************/
UNATIVE_OFFSET emitDataGenBeg(unsigned size, unsigned alignment, var_types dataType);
UNATIVE_OFFSET emitBBTableDataGenBeg(unsigned numEntries, bool relativeAddr);
void emitDataGenData(unsigned offs, const void* data, UNATIVE_OFFSET size);
void emitDataGenData(unsigned offs, BasicBlock* label);
void emitDataGenEnd();
static const UNATIVE_OFFSET INVALID_UNATIVE_OFFSET = (UNATIVE_OFFSET)-1;
UNATIVE_OFFSET emitDataGenFind(const void* cnsAddr, unsigned size, unsigned alignment, var_types dataType);
UNATIVE_OFFSET emitDataConst(const void* cnsAddr, unsigned cnsSize, unsigned cnsAlign, var_types dataType);
UNATIVE_OFFSET emitDataSize();
/************************************************************************/
/* Instruction information */
/************************************************************************/
#ifdef TARGET_XARCH
static bool instrIs3opImul(instruction ins);
static bool instrIsExtendedReg3opImul(instruction ins);
static bool instrHasImplicitRegPairDest(instruction ins);
static void check3opImulValues();
static regNumber inst3opImulReg(instruction ins);
static instruction inst3opImulForReg(regNumber reg);
#endif
/************************************************************************/
/* Emit PDB offset translation information */
/************************************************************************/
#ifdef TRANSLATE_PDB
static void SetILBaseOfCode(BYTE* pTextBase);
static void SetILMethodBase(BYTE* pMethodEntry);
static void SetILMethodStart(BYTE* pMethodCode);
static void SetImgBaseOfCode(BYTE* pTextBase);
void SetIDBaseToProlog();
void SetIDBaseToOffset(int methodOffset);
static void DisablePDBTranslation();
static bool IsPDBEnabled();
static void InitTranslationMaps(int ilCodeSize);
static void DeleteTranslationMaps();
static void InitTranslator(PDBRewriter* pPDB, int* rgSecMap, IMAGE_SECTION_HEADER** rgpHeader, int numSections);
#endif
/************************************************************************/
/* Interface for generating unwind information */
/************************************************************************/
#ifdef TARGET_ARMARCH
bool emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment = NULL);
void emitSplit(emitLocation* startLoc,
emitLocation* endLoc,
UNATIVE_OFFSET maxSplitSize,
void* context,
emitSplitCallbackType callbackFunc);
void emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp);
#endif // TARGET_ARMARCH
#if defined(TARGET_ARM)
unsigned emitGetInstructionSize(emitLocation* emitLoc);
#endif // defined(TARGET_ARM)
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/nativeaot/Runtime/gcrhinterface.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This header contains the definition of an interface between the GC/HandleTable portions of the Redhawk
// codebase and the regular Redhawk code. The former has all sorts of legacy environmental requirements (see
// gcrhenv.h) that we don't wish to pull into the rest of Redhawk.
//
// Since this file is included in both worlds it has no dependencies and uses a very simple subset of types
// etc. so that it will build cleanly in both. The actual implementation of the class defined here is in
// gcrhenv.cpp, since the implementation needs access to the guts of the GC/HandleTable.
//
// This is just an initial stab at the interface.
//
#ifndef __GCRHINTERFACE_INCLUDED
#define __GCRHINTERFACE_INCLUDED
#ifndef DACCESS_COMPILE
// Global data cells exported by the GC.
extern "C" unsigned char *g_ephemeral_low;
extern "C" unsigned char *g_ephemeral_high;
extern "C" unsigned char *g_lowest_address;
extern "C" unsigned char *g_highest_address;
#endif
struct gc_alloc_context;
class MethodInfo;
struct REGDISPLAY;
class Thread;
enum GCRefKind : unsigned char;
class ICodeManager;
class MethodTable;
// -----------------------------------------------------------------------------------------------------------
// RtuObjectRef
// -----------------------------------------------------------------------------------------------------------
//
// READ THIS!
//
// This struct exists for type description purposes, but you must never directly refer to the object
// reference. The only code allowed to do this is the code inherited directly from the CLR, which all
// includes gcrhenv.h. If your code is outside the namespace of gcrhenv.h, direct object reference
// manipulation is prohibited--use C# instead.
//
// To enforce this, we declare RtuObjectRef as a class with no public members.
//
class RtuObjectRef
{
#ifndef DACCESS_COMPILE
private:
#else
public:
#endif
TADDR pvObject;
};
typedef DPTR(RtuObjectRef) PTR_RtuObjectRef;
// -----------------------------------------------------------------------------------------------------------
// We provide various ways to enumerate GC objects or roots, each of which calls back to a user supplied
// function for each object (within the context of a garbage collection). The following function types
// describe these callbacks. Unfortunately the signatures aren't very specific: we don't want to reference
// Object* or Object** from this module, see the comment for RtuObjectRef, but this very narrow category of
// callers can't use RtuObjectRef (they really do need to drill down into the Object). The lesser evil here is
// to be a bit loose in the signature rather than exposing the Object class to the rest of Redhawk.
// Callback when enumerating objects on the GC heap or objects referenced from instance fields of another
// object. The GC dictates the shape of this signature (we're hijacking functionality originally developed for
// profiling). The real signature is:
// int ScanFunction(Object* pObject, void* pContext)
// where:
// return : treated as a boolean, zero indicates the enumeration should terminate, all other values
// say continue
// pObject : pointer to the current object being scanned
// pContext : user context passed to the original scan function and otherwise uninterpreted
typedef int (*GcScanObjectFunction)(void*, void*);
// Callback when enumerating GC roots (stack locations, statics and handles). Similar to the callback above
// except there is no means to terminate the scan (no return value) and the root location (pointer to pointer
// to object) is returned instead of a direct pointer to the object:
// void ScanFunction(Object** pRoot, void* pContext)
typedef void (*GcScanRootFunction)(void**, void*);
typedef void * GcSegmentHandle;
#define RH_LARGE_OBJECT_SIZE 85000
// A 'clump' is defined as the size of memory covered by 1 byte in the card table. These constants are
// verified against gcpriv.h in gcrhee.cpp.
#if (POINTER_SIZE == 8)
#define CLUMP_SIZE 0x800
#define LOG2_CLUMP_SIZE 11
#elif (POINTER_SIZE == 4)
#define CLUMP_SIZE 0x400
#define LOG2_CLUMP_SIZE 10
#else
#error unexpected pointer size
#endif
class RedhawkGCInterface
{
public:
// Perform any runtime-startup initialization needed by the GC, HandleTable or environmental code in
// gcrhenv. Returns true on success or false if a subsystem failed to initialize.
static bool InitializeSubsystems();
static void InitAllocContext(gc_alloc_context * pAllocContext);
static void ReleaseAllocContext(gc_alloc_context * pAllocContext);
static void WaitForGCCompletion();
static void EnumGcRef(PTR_RtuObjectRef pRef, GCRefKind kind, void * pfnEnumCallback, void * pvCallbackData);
static void BulkEnumGcObjRef(PTR_RtuObjectRef pRefs, uint32_t cRefs, void * pfnEnumCallback, void * pvCallbackData);
static void EnumGcRefs(ICodeManager * pCodeManager,
MethodInfo * pMethodInfo,
PTR_VOID safePointAddress,
REGDISPLAY * pRegisterSet,
void * pfnEnumCallback,
void * pvCallbackData);
static void EnumGcRefsInRegionConservatively(PTR_RtuObjectRef pLowerBound,
PTR_RtuObjectRef pUpperBound,
void * pfnEnumCallback,
void * pvCallbackData);
static GcSegmentHandle RegisterFrozenSegment(void * pSection, size_t SizeSection);
static void UnregisterFrozenSegment(GcSegmentHandle segment);
#ifdef FEATURE_GC_STRESS
static void StressGc();
#endif // FEATURE_GC_STRESS
// Various routines used to enumerate objects contained within a given scope (on the GC heap, as reference
// fields of an object, on a thread stack, in a static or in one of the handle tables).
static void ScanObject(void *pObject, GcScanObjectFunction pfnScanCallback, void *pContext);
static void ScanStackRoots(Thread *pThread, GcScanRootFunction pfnScanCallback, void *pContext);
static void ScanStaticRoots(GcScanRootFunction pfnScanCallback, void *pContext);
static void ScanHandleTableRoots(GcScanRootFunction pfnScanCallback, void *pContext);
// Returns size GCDesc. Used by type cloning.
static uint32_t GetGCDescSize(void * pType);
// These methods are used to get and set the type information for the last allocation on each thread.
static MethodTable * GetLastAllocEEType();
static void SetLastAllocEEType(MethodTable *pEEType);
static uint64_t GetDeadThreadsNonAllocBytes();
// Used by debugger hook
static void* CreateTypedHandle(void* object, int type);
static void DestroyTypedHandle(void* handle);
private:
// The MethodTable for the last allocation. This value is used inside of the GC allocator
// to emit allocation ETW events with type information. We set this value unconditionally to avoid
// race conditions where ETW is enabled after the value is set.
static DECLSPEC_THREAD MethodTable * tls_pLastAllocationEEType;
// Tracks the amount of bytes that were reserved for threads in their gc_alloc_context and went unused when they died.
// Used for GC.GetTotalAllocatedBytes
static uint64_t s_DeadThreadsNonAllocBytes;
};
#endif // __GCRHINTERFACE_INCLUDED
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This header contains the definition of an interface between the GC/HandleTable portions of the Redhawk
// codebase and the regular Redhawk code. The former has all sorts of legacy environmental requirements (see
// gcrhenv.h) that we don't wish to pull into the rest of Redhawk.
//
// Since this file is included in both worlds it has no dependencies and uses a very simple subset of types
// etc. so that it will build cleanly in both. The actual implementation of the class defined here is in
// gcrhenv.cpp, since the implementation needs access to the guts of the GC/HandleTable.
//
// This is just an initial stab at the interface.
//
#ifndef __GCRHINTERFACE_INCLUDED
#define __GCRHINTERFACE_INCLUDED
#ifndef DACCESS_COMPILE
// Global data cells exported by the GC.
extern "C" unsigned char *g_ephemeral_low;
extern "C" unsigned char *g_ephemeral_high;
extern "C" unsigned char *g_lowest_address;
extern "C" unsigned char *g_highest_address;
#endif
struct gc_alloc_context;
class MethodInfo;
struct REGDISPLAY;
class Thread;
enum GCRefKind : unsigned char;
class ICodeManager;
class MethodTable;
// -----------------------------------------------------------------------------------------------------------
// RtuObjectRef
// -----------------------------------------------------------------------------------------------------------
//
// READ THIS!
//
// This struct exists for type description purposes, but you must never directly refer to the object
// reference. The only code allowed to do this is the code inherited directly from the CLR, which all
// includes gcrhenv.h. If your code is outside the namespace of gcrhenv.h, direct object reference
// manipulation is prohibited--use C# instead.
//
// To enforce this, we declare RtuObjectRef as a class with no public members.
//
class RtuObjectRef
{
#ifndef DACCESS_COMPILE
private:
#else
public:
#endif
TADDR pvObject;
};
typedef DPTR(RtuObjectRef) PTR_RtuObjectRef;
// -----------------------------------------------------------------------------------------------------------
// We provide various ways to enumerate GC objects or roots, each of which calls back to a user supplied
// function for each object (within the context of a garbage collection). The following function types
// describe these callbacks. Unfortunately the signatures aren't very specific: we don't want to reference
// Object* or Object** from this module, see the comment for RtuObjectRef, but this very narrow category of
// callers can't use RtuObjectRef (they really do need to drill down into the Object). The lesser evil here is
// to be a bit loose in the signature rather than exposing the Object class to the rest of Redhawk.
// Callback when enumerating objects on the GC heap or objects referenced from instance fields of another
// object. The GC dictates the shape of this signature (we're hijacking functionality originally developed for
// profiling). The real signature is:
// int ScanFunction(Object* pObject, void* pContext)
// where:
// return : treated as a boolean, zero indicates the enumeration should terminate, all other values
// say continue
// pObject : pointer to the current object being scanned
// pContext : user context passed to the original scan function and otherwise uninterpreted
typedef int (*GcScanObjectFunction)(void*, void*);
// Callback when enumerating GC roots (stack locations, statics and handles). Similar to the callback above
// except there is no means to terminate the scan (no return value) and the root location (pointer to pointer
// to object) is returned instead of a direct pointer to the object:
// void ScanFunction(Object** pRoot, void* pContext)
typedef void (*GcScanRootFunction)(void**, void*);
typedef void * GcSegmentHandle;
#define RH_LARGE_OBJECT_SIZE 85000
// A 'clump' is defined as the size of memory covered by 1 byte in the card table. These constants are
// verified against gcpriv.h in gcrhee.cpp.
#if (POINTER_SIZE == 8)
#define CLUMP_SIZE 0x800
#define LOG2_CLUMP_SIZE 11
#elif (POINTER_SIZE == 4)
#define CLUMP_SIZE 0x400
#define LOG2_CLUMP_SIZE 10
#else
#error unexpected pointer size
#endif
class RedhawkGCInterface
{
public:
// Perform any runtime-startup initialization needed by the GC, HandleTable or environmental code in
// gcrhenv. Returns true on success or false if a subsystem failed to initialize.
static bool InitializeSubsystems();
static void InitAllocContext(gc_alloc_context * pAllocContext);
static void ReleaseAllocContext(gc_alloc_context * pAllocContext);
static void WaitForGCCompletion();
static void EnumGcRef(PTR_RtuObjectRef pRef, GCRefKind kind, void * pfnEnumCallback, void * pvCallbackData);
static void BulkEnumGcObjRef(PTR_RtuObjectRef pRefs, uint32_t cRefs, void * pfnEnumCallback, void * pvCallbackData);
static void EnumGcRefs(ICodeManager * pCodeManager,
MethodInfo * pMethodInfo,
PTR_VOID safePointAddress,
REGDISPLAY * pRegisterSet,
void * pfnEnumCallback,
void * pvCallbackData);
static void EnumGcRefsInRegionConservatively(PTR_RtuObjectRef pLowerBound,
PTR_RtuObjectRef pUpperBound,
void * pfnEnumCallback,
void * pvCallbackData);
static GcSegmentHandle RegisterFrozenSegment(void * pSection, size_t SizeSection);
static void UnregisterFrozenSegment(GcSegmentHandle segment);
#ifdef FEATURE_GC_STRESS
static void StressGc();
#endif // FEATURE_GC_STRESS
// Various routines used to enumerate objects contained within a given scope (on the GC heap, as reference
// fields of an object, on a thread stack, in a static or in one of the handle tables).
static void ScanObject(void *pObject, GcScanObjectFunction pfnScanCallback, void *pContext);
static void ScanStackRoots(Thread *pThread, GcScanRootFunction pfnScanCallback, void *pContext);
static void ScanStaticRoots(GcScanRootFunction pfnScanCallback, void *pContext);
static void ScanHandleTableRoots(GcScanRootFunction pfnScanCallback, void *pContext);
// Returns size GCDesc. Used by type cloning.
static uint32_t GetGCDescSize(void * pType);
// These methods are used to get and set the type information for the last allocation on each thread.
static MethodTable * GetLastAllocEEType();
static void SetLastAllocEEType(MethodTable *pEEType);
static uint64_t GetDeadThreadsNonAllocBytes();
// Used by debugger hook
static void* CreateTypedHandle(void* object, int type);
static void DestroyTypedHandle(void* handle);
private:
// The MethodTable for the last allocation. This value is used inside of the GC allocator
// to emit allocation ETW events with type information. We set this value unconditionally to avoid
// race conditions where ETW is enabled after the value is set.
static DECLSPEC_THREAD MethodTable * tls_pLastAllocationEEType;
// Tracks the amount of bytes that were reserved for threads in their gc_alloc_context and went unused when they died.
// Used for GC.GetTotalAllocatedBytes
static uint64_t s_DeadThreadsNonAllocBytes;
};
#endif // __GCRHINTERFACE_INCLUDED
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeBuilder.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using System.Runtime;
using System.Text;
using System.Reflection.Runtime.General;
using Internal.Runtime.Augments;
using Internal.Runtime.CompilerServices;
using Internal.Metadata.NativeFormat;
using Internal.NativeFormat;
using Internal.TypeSystem;
using Internal.TypeSystem.NativeFormat;
using Internal.TypeSystem.NoMetadata;
namespace Internal.Runtime.TypeLoader
{
using DynamicGenericsRegistrationData = TypeLoaderEnvironment.DynamicGenericsRegistrationData;
using GenericTypeEntry = TypeLoaderEnvironment.GenericTypeEntry;
using TypeEntryToRegister = TypeLoaderEnvironment.TypeEntryToRegister;
using GenericMethodEntry = TypeLoaderEnvironment.GenericMethodEntry;
using HandleBasedGenericTypeLookup = TypeLoaderEnvironment.HandleBasedGenericTypeLookup;
using DefTypeBasedGenericTypeLookup = TypeLoaderEnvironment.DefTypeBasedGenericTypeLookup;
using HandleBasedGenericMethodLookup = TypeLoaderEnvironment.HandleBasedGenericMethodLookup;
using MethodDescBasedGenericMethodLookup = TypeLoaderEnvironment.MethodDescBasedGenericMethodLookup;
using ThunkKind = CallConverterThunk.ThunkKind;
using VTableSlotMapper = TypeBuilderState.VTableSlotMapper;
internal static class LowLevelListExtensions
{
public static void Expand<T>(this LowLevelList<T> list, int count)
{
if (list.Capacity < count)
list.Capacity = count;
while (list.Count < count)
list.Add(default(T));
}
public static bool HasSetBits(this LowLevelList<bool> list)
{
for (int index = 0; index < list.Count; index++)
{
if (list[index])
return true;
}
return false;
}
}
[Flags]
internal enum FieldLoadState
{
None = 0,
Instance = 1,
Statics = 2,
}
public static class TypeBuilderApi
{
public static void ResolveMultipleCells(GenericDictionaryCell [] cells, out IntPtr[] fixups)
{
TypeBuilder.ResolveMultipleCells(cells, out fixups);
}
}
internal class TypeBuilder
{
public TypeBuilder()
{
TypeLoaderEnvironment.Instance.VerifyTypeLoaderLockHeld();
}
private const int MinimumValueTypeSize = 0x1;
/// <summary>
/// The StaticClassConstructionContext for a type is encoded in the negative space
/// of the NonGCStatic fields of a type.
/// </summary>
public static unsafe readonly int ClassConstructorOffset = -sizeof(System.Runtime.CompilerServices.StaticClassConstructionContext);
private LowLevelList<TypeDesc> _typesThatNeedTypeHandles = new LowLevelList<TypeDesc>();
private LowLevelList<InstantiatedMethod> _methodsThatNeedDictionaries = new LowLevelList<InstantiatedMethod>();
private LowLevelList<TypeDesc> _typesThatNeedPreparation;
private object _epoch = new object();
#if DEBUG
private bool _finalTypeBuilding;
#endif
// Helper exception to abort type building if we do not find the generic type template
internal class MissingTemplateException : Exception
{
}
private bool CheckAllHandlesValidForMethod(MethodDesc method)
{
if (!method.OwningType.RetrieveRuntimeTypeHandleIfPossible())
return false;
for (int i = 0; i < method.Instantiation.Length; i++)
if (!method.Instantiation[i].RetrieveRuntimeTypeHandleIfPossible())
return false;
return true;
}
internal bool RetrieveExactFunctionPointerIfPossible(MethodDesc method, out IntPtr result)
{
result = IntPtr.Zero;
if (!method.IsNonSharableMethod || !CheckAllHandlesValidForMethod(method))
return false;
RuntimeTypeHandle[] genMethodArgs = method.Instantiation.Length > 0 ? new RuntimeTypeHandle[method.Instantiation.Length] : Empty<RuntimeTypeHandle>.Array;
for (int i = 0; i < method.Instantiation.Length; i++)
genMethodArgs[i] = method.Instantiation[i].RuntimeTypeHandle;
return TypeLoaderEnvironment.Instance.TryLookupExactMethodPointerForComponents(method.OwningType.RuntimeTypeHandle, method.NameAndSignature, genMethodArgs, out result);
}
internal bool RetrieveMethodDictionaryIfPossible(InstantiatedMethod method)
{
if (method.RuntimeMethodDictionary != IntPtr.Zero)
return true;
bool allHandlesValid = CheckAllHandlesValidForMethod(method);
TypeLoaderLogger.WriteLine("Looking for method dictionary for method " + method.ToString() + " ... " + (allHandlesValid ? "(All type arg handles valid)" : ""));
IntPtr methodDictionary;
if ((allHandlesValid && TypeLoaderEnvironment.Instance.TryLookupGenericMethodDictionaryForComponents(new HandleBasedGenericMethodLookup(method), out methodDictionary)) ||
(!allHandlesValid && TypeLoaderEnvironment.Instance.TryLookupGenericMethodDictionaryForComponents(new MethodDescBasedGenericMethodLookup(method), out methodDictionary)))
{
TypeLoaderLogger.WriteLine("Found DICT = " + methodDictionary.LowLevelToString() + " for method " + method.ToString());
method.AssociateWithRuntimeMethodDictionary(methodDictionary);
return true;
}
return false;
}
/// <summary>
/// Register the type for preparation. The preparation will be done once the current type is prepared.
/// This is the prefered way to get a dependent type prepared because of it avoids issues with cycles and recursion.
/// </summary>
public void RegisterForPreparation(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Register for preparation " + type.ToString() + " ...");
// If this type has type handle, do nothing and return
if (type.RetrieveRuntimeTypeHandleIfPossible())
return;
var state = type.GetOrCreateTypeBuilderState();
// If this type was already inspected, do nothing and return.
if (state.NeedsTypeHandle)
return;
state.NeedsTypeHandle = true;
if (type.IsCanonicalSubtype(CanonicalFormKind.Any))
return;
if (_typesThatNeedPreparation == null)
_typesThatNeedPreparation = new LowLevelList<TypeDesc>();
_typesThatNeedPreparation.Add(type);
}
/// <summary>
/// Collects all dependencies that need to be created in order to create
/// the method that was passed in.
/// </summary>
public void PrepareMethod(MethodDesc method)
{
TypeLoaderLogger.WriteLine("Preparing method " + method.ToString() + " ...");
RegisterForPreparation(method.OwningType);
if (method.Instantiation.Length == 0)
return;
InstantiatedMethod genericMethod = (InstantiatedMethod)method;
if (RetrieveMethodDictionaryIfPossible(genericMethod))
return;
// If this method was already inspected, do nothing and return
if (genericMethod.NeedsDictionary)
return;
genericMethod.NeedsDictionary = true;
if (genericMethod.IsCanonicalMethod(CanonicalFormKind.Any))
return;
_methodsThatNeedDictionaries.Add(genericMethod);
foreach (var type in genericMethod.Instantiation)
RegisterForPreparation(type);
ParseNativeLayoutInfo(genericMethod);
}
private void InsertIntoNeedsTypeHandleList(TypeBuilderState state, TypeDesc type)
{
if ((type is DefType) || (type is ArrayType) || (type is PointerType) || (type is ByRefType))
{
_typesThatNeedTypeHandles.Add(type);
}
}
/// <summary>
/// Collects all dependencies that need to be created in order to create
/// the type that was passed in.
/// </summary>
internal void PrepareType(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Preparing type " + type.ToString() + " ...");
TypeBuilderState state = type.GetTypeBuilderStateIfExist();
bool hasTypeHandle = type.RetrieveRuntimeTypeHandleIfPossible();
// If this type has type handle, do nothing and return unless we should prepare even in the presence of a type handle
if (hasTypeHandle)
return;
if (state == null)
state = type.GetOrCreateTypeBuilderState();
// If this type was already prepared, do nothing unless we are re-preparing it for the purpose of loading the field layout
if (state.HasBeenPrepared)
{
return;
}
state.HasBeenPrepared = true;
state.NeedsTypeHandle = true;
if (!hasTypeHandle)
{
InsertIntoNeedsTypeHandleList(state, type);
}
bool noExtraPreparation = false; // Set this to true for types which don't need other types to be prepared. I.e GenericTypeDefinitions
if (type is DefType)
{
DefType typeAsDefType = (DefType)type;
if (typeAsDefType.HasInstantiation)
{
if (typeAsDefType.IsTypeDefinition)
{
noExtraPreparation = true;
}
else
{
// This call to ComputeTemplate will find the native layout info for the type, and the template
// For metadata loaded types, a template will not exist, but we may find the NativeLayout describing the generic dictionary
typeAsDefType.ComputeTemplate(state, false);
Debug.Assert(state.TemplateType == null || (state.TemplateType is DefType && !state.TemplateType.RuntimeTypeHandle.IsNull()));
// Collect dependencies
// We need the instantiation arguments to register a generic type
foreach (var instArg in typeAsDefType.Instantiation)
RegisterForPreparation(instArg);
// We need the type definition to register a generic type
if (type.GetTypeDefinition() is MetadataType)
RegisterForPreparation(type.GetTypeDefinition());
ParseNativeLayoutInfo(state, type);
}
}
if (!noExtraPreparation)
state.PrepareStaticGCLayout();
}
else if (type is ParameterizedType)
{
PrepareType(((ParameterizedType)type).ParameterType);
if (type is ArrayType)
{
ArrayType typeAsArrayType = (ArrayType)type;
if (typeAsArrayType.IsSzArray && !typeAsArrayType.ElementType.IsPointer)
{
typeAsArrayType.ComputeTemplate(state);
Debug.Assert(state.TemplateType != null && state.TemplateType is ArrayType && !state.TemplateType.RuntimeTypeHandle.IsNull());
ParseNativeLayoutInfo(state, type);
}
else
{
Debug.Assert(typeAsArrayType.IsMdArray || typeAsArrayType.ElementType.IsPointer);
}
// Assert that non-valuetypes are considered to have pointer size
Debug.Assert(typeAsArrayType.ParameterType.IsValueType || state.ComponentSize == IntPtr.Size);
}
}
else
{
Debug.Assert(false);
}
// Need to prepare the base type first since it is used to compute interfaces
if (!noExtraPreparation)
{
PrepareBaseTypeAndDictionaries(type);
PrepareRuntimeInterfaces(type);
TypeLoaderLogger.WriteLine("Layout for type " + type.ToString() + " complete." +
" IsHFA = " + (state.IsHFA ? "true" : "false") +
" Type size = " + (state.TypeSize.HasValue ? state.TypeSize.Value.LowLevelToString() : "UNDEF") +
" Fields size = " + (state.UnalignedTypeSize.HasValue ? state.UnalignedTypeSize.Value.LowLevelToString() : "UNDEF") +
" Type alignment = " + (state.FieldAlignment.HasValue ? state.FieldAlignment.Value.LowLevelToString() : "UNDEF"));
#if FEATURE_UNIVERSAL_GENERICS
if (state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
{
state.VTableSlotsMapping = new VTableSlotMapper(state.TemplateType.RuntimeTypeHandle.GetNumVtableSlots());
ComputeVTableLayout(type, state.TemplateType, state);
}
#endif
}
}
/// <summary>
/// Recursively triggers preparation for a type's runtime interfaces
/// </summary>
private void PrepareRuntimeInterfaces(TypeDesc type)
{
// Prepare all the interfaces that might be used. (This can be a superset of the
// interfaces explicitly in the NativeLayout.)
foreach (DefType interfaceType in type.RuntimeInterfaces)
{
PrepareType(interfaceType);
}
}
/// <summary>
/// Triggers preparation for a type's base types
/// </summary>
private void PrepareBaseTypeAndDictionaries(TypeDesc type)
{
DefType baseType = type.BaseType;
if (baseType == null)
return;
PrepareType(baseType);
}
private void ProcessTypesNeedingPreparation()
{
// Process the pending types
while (_typesThatNeedPreparation != null)
{
var pendingTypes = _typesThatNeedPreparation;
_typesThatNeedPreparation = null;
for (int i = 0; i < pendingTypes.Count; i++)
PrepareType(pendingTypes[i]);
}
}
private GenericDictionaryCell[] GetGenericMethodDictionaryCellsForMetadataBasedLoad(InstantiatedMethod method, InstantiatedMethod nonTemplateMethod)
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
uint r2rNativeLayoutInfoToken;
GenericDictionaryCell[] cells = null;
NativeFormatModuleInfo r2rNativeLayoutModuleInfo;
if ((new TemplateLocator()).TryGetMetadataNativeLayout(nonTemplateMethod, out r2rNativeLayoutModuleInfo, out r2rNativeLayoutInfoToken))
{
// ReadyToRun dictionary parsing
NativeReader readyToRunReader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(r2rNativeLayoutModuleInfo.Handle);
var readyToRunInfoParser = new NativeParser(readyToRunReader, r2rNativeLayoutInfoToken);
// A null readyToRunInfoParser is a valid situation to end up in
// This can happen if either we have exact code for a method, or if
// we are going to use the universal generic implementation.
// In both of those cases, we do not have any generic dictionary cells
// to put into the dictionary
if (!readyToRunInfoParser.IsNull)
{
NativeFormatMetadataUnit nativeMetadataUnit = method.Context.ResolveMetadataUnit(r2rNativeLayoutModuleInfo);
FixupCellMetadataResolver resolver = new FixupCellMetadataResolver(nativeMetadataUnit, nonTemplateMethod);
cells = GenericDictionaryCell.BuildDictionaryFromMetadataTokensAndContext(this, readyToRunInfoParser, nativeMetadataUnit, resolver);
}
}
return cells;
#else
return null;
#endif
}
internal void ParseNativeLayoutInfo(InstantiatedMethod method)
{
TypeLoaderLogger.WriteLine("Parsing NativeLayoutInfo for method " + method.ToString() + " ...");
Debug.Assert(method.Dictionary == null);
InstantiatedMethod nonTemplateMethod = method;
// Templates are always non-unboxing stubs
if (method.UnboxingStub)
{
// Strip unboxing stub, note the first parameter which is false
nonTemplateMethod = (InstantiatedMethod)method.Context.ResolveGenericMethodInstantiation(false, (DefType)method.OwningType, method.NameAndSignature, method.Instantiation, IntPtr.Zero, false);
}
uint nativeLayoutInfoToken;
NativeFormatModuleInfo nativeLayoutModule;
MethodDesc templateMethod = (new TemplateLocator()).TryGetGenericMethodTemplate(nonTemplateMethod, out nativeLayoutModule, out nativeLayoutInfoToken);
// If the templateMethod found in the static image is missing or universal, see if the R2R layout
// can provide something more specific.
if ((templateMethod == null) || templateMethod.IsCanonicalMethod(CanonicalFormKind.Universal))
{
GenericDictionaryCell[] cells = GetGenericMethodDictionaryCellsForMetadataBasedLoad(method, nonTemplateMethod);
if (cells != null)
{
method.SetGenericDictionary(new GenericMethodDictionary(cells));
return;
}
if (templateMethod == null)
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
// In this case we were looking for the r2r template to create the dictionary, but
// there isn't one. This implies that we don't need a Canon specific dictionary
// so just generate something empty
method.SetGenericDictionary(new GenericMethodDictionary(Array.Empty<GenericDictionaryCell>()));
return;
#else
throw new TypeBuilder.MissingTemplateException();
#endif
}
}
// Ensure that if this method is non-shareable from a normal canonical perspective, then
// its template MUST be a universal canonical template method
Debug.Assert(!method.IsNonSharableMethod || (method.IsNonSharableMethod && templateMethod.IsCanonicalMethod(CanonicalFormKind.Universal)));
NativeReader nativeLayoutInfoReader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(nativeLayoutModule.Handle);
var methodInfoParser = new NativeParser(nativeLayoutInfoReader, nativeLayoutInfoToken);
var context = new NativeLayoutInfoLoadContext
{
_typeSystemContext = method.Context,
_typeArgumentHandles = method.OwningType.Instantiation,
_methodArgumentHandles = method.Instantiation,
_module = nativeLayoutModule
};
BagElementKind kind;
while ((kind = methodInfoParser.GetBagElementKind()) != BagElementKind.End)
{
switch (kind)
{
case BagElementKind.DictionaryLayout:
TypeLoaderLogger.WriteLine("Found BagElementKind.DictionaryLayout");
method.SetGenericDictionary(new GenericMethodDictionary(GenericDictionaryCell.BuildDictionary(this, context, methodInfoParser.GetParserFromRelativeOffset())));
break;
default:
Debug.Fail("Unexpected BagElementKind for generic method with name " + method.NameAndSignature.Name + "! Only BagElementKind.DictionaryLayout should appear.");
throw new BadImageFormatException();
}
}
if (method.Dictionary == null)
method.SetGenericDictionary(new GenericMethodDictionary(Array.Empty<GenericDictionaryCell>()));
}
internal void ParseNativeLayoutInfo(TypeBuilderState state, TypeDesc type)
{
TypeLoaderLogger.WriteLine("Parsing NativeLayoutInfo for type " + type.ToString() + " ...");
bool isTemplateUniversalCanon = false;
if (state.TemplateType != null)
{
isTemplateUniversalCanon = state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal);
}
// If we found the universal template, see if there is a ReadyToRun dictionary description available.
// If so, use that, otherwise, run down the template type loader path with the universal template
if ((state.TemplateType == null) || isTemplateUniversalCanon)
{
// ReadyToRun case - Native Layout is just the dictionary
NativeParser readyToRunInfoParser = state.GetParserForReadyToRunNativeLayoutInfo();
GenericDictionaryCell[] cells = null;
// A null readyToRunInfoParser is a valid situation to end up in
// This can happen if either we have exact code for the method on a type, or if
// we are going to use the universal generic implementation.
// In both of those cases, we do not have any generic dictionary cells
// to put into the dictionary
if (!readyToRunInfoParser.IsNull)
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
NativeFormatMetadataUnit nativeMetadataUnit = type.Context.ResolveMetadataUnit(state.R2RNativeLayoutInfo.Module);
FixupCellMetadataResolver resolver = new FixupCellMetadataResolver(nativeMetadataUnit, type);
cells = GenericDictionaryCell.BuildDictionaryFromMetadataTokensAndContext(this, readyToRunInfoParser, nativeMetadataUnit, resolver);
#endif
}
state.Dictionary = cells != null ? new GenericTypeDictionary(cells) : null;
if (state.TemplateType == null)
return;
}
NativeParser typeInfoParser = state.GetParserForNativeLayoutInfo();
NativeLayoutInfoLoadContext context = state.NativeLayoutInfo.LoadContext;
NativeParser baseTypeParser = new NativeParser();
int nonGcDataSize = 0;
int gcDataSize = 0;
int threadDataSize = 0;
bool staticSizesMeaningful = (type is DefType) // Is type permitted to have static fields
&& !isTemplateUniversalCanon; // Non-universal templates always specify their statics sizes
// if the size can be greater than 0
int baseTypeSize = 0;
bool checkBaseTypeSize = false;
BagElementKind kind;
while ((kind = typeInfoParser.GetBagElementKind()) != BagElementKind.End)
{
switch (kind)
{
case BagElementKind.BaseType:
TypeLoaderLogger.WriteLine("Found BagElementKind.BaseType");
Debug.Assert(baseTypeParser.IsNull);
baseTypeParser = typeInfoParser.GetParserFromRelativeOffset();
break;
case BagElementKind.BaseTypeSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.BaseTypeSize");
Debug.Assert(state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal));
baseTypeSize = checked((int)typeInfoParser.GetUnsigned());
break;
case BagElementKind.ImplementedInterfaces:
TypeLoaderLogger.WriteLine("Found BagElementKind.ImplementedInterfaces");
// Interface handling is done entirely in NativeLayoutInterfacesAlgorithm
typeInfoParser.GetUnsigned();
break;
case BagElementKind.TypeFlags:
{
TypeLoaderLogger.WriteLine("Found BagElementKind.TypeFlags");
Internal.NativeFormat.TypeFlags flags = (Internal.NativeFormat.TypeFlags)typeInfoParser.GetUnsigned();
Debug.Assert(state.HasStaticConstructor == ((flags & Internal.NativeFormat.TypeFlags.HasClassConstructor) != 0));
}
break;
case BagElementKind.ClassConstructorPointer:
TypeLoaderLogger.WriteLine("Found BagElementKind.ClassConstructorPointer");
state.ClassConstructorPointer = context.GetGCStaticInfo(typeInfoParser.GetUnsigned());
break;
case BagElementKind.NonGcStaticDataSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.NonGcStaticDataSize");
// Use checked typecast to int to ensure there aren't any overflows/truncations (size value used in allocation of memory later)
nonGcDataSize = checked((int)typeInfoParser.GetUnsigned());
Debug.Assert(staticSizesMeaningful);
break;
case BagElementKind.GcStaticDataSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.GcStaticDataSize");
// Use checked typecast to int to ensure there aren't any overflows/truncations (size value used in allocation of memory later)
gcDataSize = checked((int)typeInfoParser.GetUnsigned());
Debug.Assert(staticSizesMeaningful);
break;
case BagElementKind.ThreadStaticDataSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.ThreadStaticDataSize");
// Use checked typecast to int to ensure there aren't any overflows/truncations (size value used in allocation of memory later)
threadDataSize = checked((int)typeInfoParser.GetUnsigned());
Debug.Assert(staticSizesMeaningful);
break;
case BagElementKind.GcStaticDesc:
TypeLoaderLogger.WriteLine("Found BagElementKind.GcStaticDesc");
state.GcStaticDesc = context.GetGCStaticInfo(typeInfoParser.GetUnsigned());
break;
case BagElementKind.ThreadStaticDesc:
TypeLoaderLogger.WriteLine("Found BagElementKind.ThreadStaticDesc");
state.ThreadStaticDesc = context.GetGCStaticInfo(typeInfoParser.GetUnsigned());
break;
case BagElementKind.GenericVarianceInfo:
TypeLoaderLogger.WriteLine("Found BagElementKind.GenericVarianceInfo");
NativeParser varianceInfoParser = typeInfoParser.GetParserFromRelativeOffset();
state.GenericVarianceFlags = new GenericVariance[varianceInfoParser.GetSequenceCount()];
for (int i = 0; i < state.GenericVarianceFlags.Length; i++)
state.GenericVarianceFlags[i] = checked((GenericVariance)varianceInfoParser.GetUnsigned());
break;
case BagElementKind.FieldLayout:
TypeLoaderLogger.WriteLine("Found BagElementKind.FieldLayout");
typeInfoParser.SkipInteger(); // Handled in type layout algorithm
break;
#if FEATURE_UNIVERSAL_GENERICS
case BagElementKind.VTableMethodSignatures:
TypeLoaderLogger.WriteLine("Found BagElementKind.VTableMethodSignatures");
ParseVTableMethodSignatures(state, context, typeInfoParser.GetParserFromRelativeOffset());
break;
#endif
case BagElementKind.SealedVTableEntries:
TypeLoaderLogger.WriteLine("Found BagElementKind.SealedVTableEntries");
state.NumSealedVTableEntries = typeInfoParser.GetUnsigned();
break;
case BagElementKind.DictionaryLayout:
TypeLoaderLogger.WriteLine("Found BagElementKind.DictionaryLayout");
Debug.Assert(!isTemplateUniversalCanon, "Universal template nativelayout do not have DictionaryLayout");
Debug.Assert(state.Dictionary == null);
if (!state.TemplateType.RetrieveRuntimeTypeHandleIfPossible())
{
TypeLoaderLogger.WriteLine("ERROR: failed to get type handle for template type " + state.TemplateType.ToString());
throw new TypeBuilder.MissingTemplateException();
}
state.Dictionary = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionary(this, context, typeInfoParser.GetParserFromRelativeOffset()));
break;
default:
TypeLoaderLogger.WriteLine("Found unknown BagElementKind: " + ((int)kind).LowLevelToString());
typeInfoParser.SkipInteger();
break;
}
}
if (staticSizesMeaningful)
{
Debug.Assert((state.NonGcDataSize + (state.HasStaticConstructor ? TypeBuilder.ClassConstructorOffset : 0)) == nonGcDataSize);
Debug.Assert(state.GcDataSize == gcDataSize);
Debug.Assert(state.ThreadDataSize == threadDataSize);
}
#if GENERICS_FORCE_USG
if (isTemplateUniversalCanon && type.CanShareNormalGenericCode())
{
// Even in the GENERICS_FORCE_USG stress mode today, codegen will generate calls to normal-canonical target methods whenever possible.
// Given that we use universal template types to build the dynamic EETypes, these dynamic types will end up with NULL dictionary
// entries, causing the normal-canonical code sharing to fail.
// To fix this problem, we will load the generic dictionary from the non-universal template type, and build a generic dictionary out of
// it for the dynamic type, and store that dictionary pointer in the dynamic MethodTable's structure.
TypeBuilderState tempState = new TypeBuilderState();
tempState.NativeLayoutInfo = new NativeLayoutInfo();
state.NonUniversalTemplateType = tempState.TemplateType = type.Context.TemplateLookup.TryGetNonUniversalTypeTemplate(type, ref tempState.NativeLayoutInfo);
if (tempState.TemplateType != null)
{
Debug.Assert(!tempState.TemplateType.IsCanonicalSubtype(CanonicalFormKind.UniversalCanonLookup));
NativeParser nonUniversalTypeInfoParser = GetNativeLayoutInfoParser(type, ref tempState.NativeLayoutInfo);
NativeParser dictionaryLayoutParser = nonUniversalTypeInfoParser.GetParserForBagElementKind(BagElementKind.DictionaryLayout);
if (!dictionaryLayoutParser.IsNull)
state.Dictionary = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionary(this, context, dictionaryLayoutParser));
// Get the non-universal GCDesc pointers, so we can compare them the ones we will dynamically construct for the type
// and verify they are equal (This is an easy and predictable way of validation for the GCDescs creation logic in the stress mode)
GetNonUniversalGCDescPointers(type, state, tempState);
}
}
#endif
type.ParseBaseType(context, baseTypeParser);
// Assert that parsed base type size matches the BaseTypeSize that we calculated.
Debug.Assert(!checkBaseTypeSize || state.BaseTypeSize == baseTypeSize);
}
#if FEATURE_UNIVERSAL_GENERICS
private void ParseVTableMethodSignatures(TypeBuilderState state, NativeLayoutInfoLoadContext nativeLayoutInfoLoadContext, NativeParser methodSignaturesParser)
{
TypeDesc type = state.TypeBeingBuilt;
if (methodSignaturesParser.IsNull)
return;
// Processing vtable method signatures is only meaningful in the context of universal generics only
Debug.Assert(state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal));
uint numSignatures = methodSignaturesParser.GetUnsigned();
state.VTableMethodSignatures = new TypeBuilderState.VTableLayoutInfo[numSignatures];
for (int i = 0; i < numSignatures; i++)
{
state.VTableMethodSignatures[i] = new TypeBuilderState.VTableLayoutInfo();
uint slot = methodSignaturesParser.GetUnsigned();
state.VTableMethodSignatures[i].VTableSlot = (slot >> 1);
if ((slot & 1) == 1)
{
state.VTableMethodSignatures[i].IsSealedVTableSlot = true;
state.NumSealedVTableMethodSignatures++;
}
NativeParser sigParser = methodSignaturesParser.GetParserFromRelativeOffset();
state.VTableMethodSignatures[i].MethodSignature = RuntimeSignature.CreateFromNativeLayoutSignature(nativeLayoutInfoLoadContext._module.Handle, sigParser.Offset);
}
}
#endif
private unsafe void ComputeVTableLayout(TypeDesc currentType, TypeDesc currentTemplateType, TypeBuilderState targetTypeState)
{
TypeDesc baseType = GetBaseTypeThatIsCorrectForMDArrays(currentType);
TypeDesc baseTemplateType = GetBaseTypeUsingRuntimeTypeHandle(currentTemplateType);
Debug.Assert((baseType == null && baseTemplateType == null) || (baseType != null && baseTemplateType != null));
// Compute the vtable layout for the current type starting with base types first
if (baseType != null)
ComputeVTableLayout(baseType, baseTemplateType, targetTypeState);
currentTemplateType.RetrieveRuntimeTypeHandleIfPossible();
Debug.Assert(!currentTemplateType.RuntimeTypeHandle.IsNull());
Debug.Assert(baseTemplateType == null || !baseTemplateType.RuntimeTypeHandle.IsNull());
// The m_usNumVtableSlots field on EETypes includes the count of vtable slots of the base type,
// so make sure we don't count that twice!
int currentVtableIndex = baseTemplateType == null ? 0 : baseTemplateType.RuntimeTypeHandle.GetNumVtableSlots();
IntPtr dictionarySlotInVtable = IntPtr.Zero;
if (currentType.IsGeneric())
{
if (!currentType.CanShareNormalGenericCode() && currentTemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
{
// We are building a type that cannot share code with normal canonical types, so the type has to have
// the same vtable layout as non-shared generics, meaning no dictionary pointer in the vtable.
// We use universal canonical template types to build such types. Universal canonical types have 'NULL'
// dictionary pointers in their vtables, so we'll start copying the vtable entries right after that
// dictionary slot (dictionaries are accessed/used at runtime in a different way, not through the vtable
// dictionary pointer for such types).
currentVtableIndex++;
}
else if (currentType.CanShareNormalGenericCode())
{
// In the case of a normal canonical type in their base class hierarchy,
// we need to keep track of its dictionary slot in the vtable mapping, and try to
// copy its value values directly from its template type vtable.
// Two possible cases:
// 1) The template type is a normal canonical type. In this case, the dictionary value
// in the vtable slot of the template is NULL, but that's ok because this case is
// correctly handled anyways by the FinishBaseTypeAndDictionaries() API.
// 2) The template type is NOT a canonical type. In this case, the dictionary value
// in the vtable slot of the template is not null, and we keep track of it in the
// VTableSlotsMapping so we can copy it to the dynamic type after creation.
// This corner case is not handled by FinishBaseTypeAndDictionaries(), so we track it
// here.
// Examples:
// 1) Derived<T,U> : Base<U>, instantiated over [int,string]
// 2) Derived<__Universal> : BaseClass, and BaseClass : BaseBaseClass<object>
// 3) Derived<__Universal> : BaseClass<object>
Debug.Assert(currentTemplateType != null && !currentTemplateType.RuntimeTypeHandle.IsNull());
IntPtr* pTemplateVtable = (IntPtr*)((byte*)(currentTemplateType.RuntimeTypeHandle.ToEETypePtr()) + sizeof(MethodTable));
dictionarySlotInVtable = pTemplateVtable[currentVtableIndex];
}
}
else if (currentType is ArrayType)
{
if (currentTemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
{
TypeDesc canonicalElementType = currentType.Context.ConvertToCanon(((ArrayType)currentType).ElementType, CanonicalFormKind.Specific);
bool quickIsNotCanonical = canonicalElementType == ((ArrayType)currentType).ElementType;
Debug.Assert(quickIsNotCanonical == !canonicalElementType.IsCanonicalSubtype(CanonicalFormKind.Any));
if (quickIsNotCanonical)
{
// We are building a type that cannot share code with normal canonical types, so the type has to have
// the same vtable layout as non-shared generics, meaning no dictionary pointer in the vtable.
// We use universal canonical template types to build such types. Universal canonical types have 'NULL'
// dictionary pointers in their vtables, so we'll start copying the vtable entries right after that
// dictionary slot (dictionaries are accessed/used at runtime in a different way, not through the vtable
// dictionary pointer for such types).
currentVtableIndex++;
}
}
}
// Map vtable entries from target type's template type
int numVtableSlotsOnCurrentTemplateType = currentTemplateType.RuntimeTypeHandle.GetNumVtableSlots();
for (; currentVtableIndex < numVtableSlotsOnCurrentTemplateType; currentVtableIndex++)
{
targetTypeState.VTableSlotsMapping.AddMapping(
currentVtableIndex,
targetTypeState.VTableSlotsMapping.NumSlotMappings,
dictionarySlotInVtable);
// Reset dictionarySlotInVtable (only one dictionary slot in vtable per type)
dictionarySlotInVtable = IntPtr.Zero;
}
// Sanity check: vtable of the dynamic type should be equal or smaller than the vtable of the template type
Debug.Assert(targetTypeState.VTableSlotsMapping.NumSlotMappings <= numVtableSlotsOnCurrentTemplateType);
}
/// <summary>
/// Wraps information about how a type is laid out into one package. Types may have been laid out by
/// TypeBuilder (which means they have a gc bitfield), or they could be types that were laid out by NUTC
/// (which means we only have a GCDesc for them). This struct wraps both of those possibilities into
/// one package to be able to write that layout to another bitfield we are constructing. (This is for
/// struct fields.)
/// </summary>
internal unsafe struct GCLayout
{
private LowLevelList<bool> _bitfield;
private unsafe void* _gcdesc;
private int _size;
private bool _isReferenceTypeGCLayout;
public static GCLayout None { get { return new GCLayout(); } }
public static GCLayout SingleReference { get; } = new GCLayout(new LowLevelList<bool>(new bool[1] { true }), false);
public bool IsNone { get { return _bitfield == null && _gcdesc == null; } }
public GCLayout(LowLevelList<bool> bitfield, bool isReferenceTypeGCLayout)
{
Debug.Assert(bitfield != null);
_bitfield = bitfield;
_gcdesc = null;
_size = 0;
_isReferenceTypeGCLayout = isReferenceTypeGCLayout;
}
public GCLayout(RuntimeTypeHandle rtth)
{
MethodTable* MethodTable = rtth.ToEETypePtr();
Debug.Assert(MethodTable != null);
_bitfield = null;
_isReferenceTypeGCLayout = false; // This field is only used for the LowLevelList<bool> path
_gcdesc = MethodTable->HasGCPointers ? (void**)MethodTable - 1 : null;
_size = (int)MethodTable->BaseSize;
}
/// <summary>
/// Writes this layout to the given bitfield.
/// </summary>
/// <param name="bitfield">The bitfield to write a layout to (may be null, at which
/// point it will be created and assigned).</param>
/// <param name="offset">The offset at which we need to write the bitfield.</param>
public void WriteToBitfield(LowLevelList<bool> bitfield, int offset)
{
if (bitfield == null)
throw new ArgumentNullException(nameof(bitfield));
if (IsNone)
return;
// Ensure exactly one of these two are set.
Debug.Assert(_gcdesc != null ^ _bitfield != null);
if (_bitfield != null)
MergeBitfields(bitfield, offset);
else
WriteGCDescToBitfield(bitfield, offset);
}
private unsafe void WriteGCDescToBitfield(LowLevelList<bool> bitfield, int offset)
{
int startIndex = offset / IntPtr.Size;
void** ptr = (void**)_gcdesc;
Debug.Assert(_gcdesc != null);
// Number of series
int count = (int)*ptr-- - 1;
Debug.Assert(count >= 0);
// Ensure capacity for the values we are about to write
int capacity = startIndex + _size / IntPtr.Size - 2;
bitfield.Expand(capacity);
while (count-- >= 0)
{
int offs = (int)*ptr-- / IntPtr.Size - 1;
int len = ((int)*ptr-- + _size) / IntPtr.Size;
Debug.Assert(len > 0);
Debug.Assert(offs >= 0);
for (int i = 0; i < len; i++)
bitfield[startIndex + offs + i] = true;
}
}
private void MergeBitfields(LowLevelList<bool> outputBitfield, int offset)
{
int startIndex = offset / IntPtr.Size;
// These routines represent the GC layout after the MethodTable pointer
// in an object, but the LowLevelList<bool> bitfield logically contains
// the EETypepointer if it is describing a reference type. So, skip the
// first value.
int itemsToSkip = _isReferenceTypeGCLayout ? 1 : 0;
// Assert that we only skip a non-reported pointer.
Debug.Assert(itemsToSkip == 0 || _bitfield[0] == false);
// Ensure capacity for the values we are about to write
int capacity = startIndex + _bitfield.Count - itemsToSkip;
outputBitfield.Expand(capacity);
for (int i = itemsToSkip; i < _bitfield.Count; i++)
{
// We should never overwrite a TRUE value in the table.
Debug.Assert(!outputBitfield[startIndex + i - itemsToSkip] || _bitfield[i]);
outputBitfield[startIndex + i - itemsToSkip] = _bitfield[i];
}
}
}
#if GENERICS_FORCE_USG
private unsafe void GetNonUniversalGCDescPointers(TypeDesc type, TypeBuilderState state, TypeBuilderState tempNonUniversalState)
{
NativeParser nonUniversalTypeInfoParser = GetNativeLayoutInfoParser(type, ref tempNonUniversalState.NativeLayoutInfo);
NativeLayoutInfoLoadContext context = tempNonUniversalState.NativeLayoutInfo.LoadContext;
uint beginOffset = nonUniversalTypeInfoParser.Offset;
uint? staticGCDescId = nonUniversalTypeInfoParser.GetUnsignedForBagElementKind(BagElementKind.GcStaticDesc);
nonUniversalTypeInfoParser.Offset = beginOffset;
uint? threadStaticGCDescId = nonUniversalTypeInfoParser.GetUnsignedForBagElementKind(BagElementKind.ThreadStaticDesc);
if(staticGCDescId.HasValue)
state.NonUniversalStaticGCDesc = context.GetStaticInfo(staticGCDescId.Value);
if (threadStaticGCDescId.HasValue)
state.NonUniversalThreadStaticGCDesc = context.GetStaticInfo(threadStaticGCDescId.Value);
state.NonUniversalInstanceGCDescSize = RuntimeAugments.GetGCDescSize(tempNonUniversalState.TemplateType.RuntimeTypeHandle);
if (state.NonUniversalInstanceGCDescSize > 0)
state.NonUniversalInstanceGCDesc = new IntPtr(((byte*)tempNonUniversalState.TemplateType.RuntimeTypeHandle.ToIntPtr().ToPointer()) - 1);
}
#endif
private unsafe void AllocateRuntimeType(TypeDesc type)
{
TypeBuilderState state = type.GetTypeBuilderState();
Debug.Assert(type is DefType || type is ArrayType || type is PointerType || type is ByRefType);
if (state.ThreadDataSize != 0)
state.ThreadStaticOffset = TypeLoaderEnvironment.Instance.GetNextThreadStaticsOffsetValue();
RuntimeTypeHandle rtt = EETypeCreator.CreateEEType(type, state);
if (state.ThreadDataSize != 0)
TypeLoaderEnvironment.Instance.RegisterDynamicThreadStaticsInfo(state.HalfBakedRuntimeTypeHandle, state.ThreadStaticOffset, state.ThreadDataSize);
TypeLoaderLogger.WriteLine("Allocated new type " + type.ToString() + " with hashcode value = 0x" + type.GetHashCode().LowLevelToString() + " with MethodTable = " + rtt.ToIntPtr().LowLevelToString() + " of size " + rtt.ToEETypePtr()->BaseSize.LowLevelToString());
}
private void AllocateRuntimeMethodDictionary(InstantiatedMethod method)
{
Debug.Assert(method.RuntimeMethodDictionary == IntPtr.Zero && method.Dictionary != null);
IntPtr rmd = method.Dictionary.Allocate();
method.AssociateWithRuntimeMethodDictionary(rmd);
TypeLoaderLogger.WriteLine("Allocated new method dictionary for method " + method.ToString() + " @ " + rmd.LowLevelToString());
}
private RuntimeTypeHandle[] GetGenericContextOfBaseType(DefType type, int vtableMethodSlot)
{
DefType baseType = type.BaseType;
Debug.Assert(baseType == null || !GetRuntimeTypeHandle(baseType).IsNull());
Debug.Assert(vtableMethodSlot < GetRuntimeTypeHandle(type).GetNumVtableSlots());
int numBaseTypeVtableSlots = baseType == null ? 0 : GetRuntimeTypeHandle(baseType).GetNumVtableSlots();
if (vtableMethodSlot < numBaseTypeVtableSlots)
return GetGenericContextOfBaseType(baseType, vtableMethodSlot);
else
return GetRuntimeTypeHandles(type.Instantiation);
}
#if FEATURE_UNIVERSAL_GENERICS
private unsafe void FinishVTableCallingConverterThunks(TypeDesc type, TypeBuilderState state)
{
Debug.Assert(state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal));
if (state.VTableMethodSignatures == null || state.VTableMethodSignatures.Length == 0)
return;
int numVtableSlots = GetRuntimeTypeHandle(type).GetNumVtableSlots();
IntPtr* vtableCells = (IntPtr*)((byte*)GetRuntimeTypeHandle(type).ToIntPtr() + sizeof(MethodTable));
Debug.Assert((state.VTableMethodSignatures.Length - state.NumSealedVTableMethodSignatures) <= numVtableSlots);
TypeDesc baseType = type.BaseType;
int numBaseTypeVtableSlots = GetRuntimeTypeHandle(baseType).GetNumVtableSlots();
// Generic context
RuntimeTypeHandle[] typeArgs = Empty<RuntimeTypeHandle>.Array;
if (type is DefType)
typeArgs = GetRuntimeTypeHandles(((DefType)type).Instantiation);
else if (type is ArrayType)
typeArgs = GetRuntimeTypeHandles(new Instantiation(new TypeDesc[] { ((ArrayType)type).ElementType }));
for (int i = 0; i < state.VTableMethodSignatures.Length; i++)
{
RuntimeTypeHandle[] typeArgsToUse = typeArgs;
int vtableSlotInDynamicType = -1;
if (!state.VTableMethodSignatures[i].IsSealedVTableSlot)
{
vtableSlotInDynamicType = state.VTableSlotsMapping.GetVTableSlotInTargetType((int)state.VTableMethodSignatures[i].VTableSlot);
Debug.Assert(vtableSlotInDynamicType != -1);
if (vtableSlotInDynamicType < numBaseTypeVtableSlots)
{
// Vtable method from the vtable portion of a base type. Use generic context of the basetype defining the vtable slot.
// We should never reach here for array types (the vtable entries of the System.Array basetype should never need a converter).
Debug.Assert(type is DefType);
typeArgsToUse = GetGenericContextOfBaseType((DefType)type, vtableSlotInDynamicType);
}
}
IntPtr originalFunctionPointerFromVTable = state.VTableMethodSignatures[i].IsSealedVTableSlot ?
((IntPtr*)state.HalfBakedSealedVTable)[state.VTableMethodSignatures[i].VTableSlot] :
vtableCells[vtableSlotInDynamicType];
IntPtr thunkPtr = CallConverterThunk.MakeThunk(
ThunkKind.StandardToGeneric,
originalFunctionPointerFromVTable,
state.VTableMethodSignatures[i].MethodSignature,
IntPtr.Zero, // No instantiating arg for non-generic instance methods
typeArgsToUse,
Empty<RuntimeTypeHandle>.Array); // No GVMs in vtables, no no method args
if (state.VTableMethodSignatures[i].IsSealedVTableSlot)
{
// Patch the sealed vtable entry to point to the calling converter thunk
Debug.Assert(state.VTableMethodSignatures[i].VTableSlot < state.NumSealedVTableEntries && state.HalfBakedSealedVTable != IntPtr.Zero);
((IntPtr*)state.HalfBakedSealedVTable)[state.VTableMethodSignatures[i].VTableSlot] = thunkPtr;
}
else
{
// Patch the vtable entry to point to the calling converter thunk
Debug.Assert(vtableSlotInDynamicType < numVtableSlots && vtableCells != null);
vtableCells[vtableSlotInDynamicType] = thunkPtr;
}
}
}
#endif
//
// Returns either the registered type handle or half-baked type handle. This method should be only called
// during final phase of type building.
//
public RuntimeTypeHandle GetRuntimeTypeHandle(TypeDesc type)
{
#if DEBUG
Debug.Assert(_finalTypeBuilding);
#endif
var rtth = type.RuntimeTypeHandle;
if (!rtth.IsNull())
return rtth;
rtth = type.GetTypeBuilderState().HalfBakedRuntimeTypeHandle;
Debug.Assert(!rtth.IsNull());
return rtth;
}
public RuntimeTypeHandle[] GetRuntimeTypeHandles(Instantiation types)
{
if (types.Length == 0)
return Array.Empty<RuntimeTypeHandle>();
RuntimeTypeHandle[] result = new RuntimeTypeHandle[types.Length];
for (int i = 0; i < types.Length; i++)
result[i] = GetRuntimeTypeHandle(types[i]);
return result;
}
public static DefType GetBaseTypeUsingRuntimeTypeHandle(TypeDesc type)
{
type.RetrieveRuntimeTypeHandleIfPossible();
unsafe
{
RuntimeTypeHandle thBaseTypeTemplate = type.RuntimeTypeHandle.ToEETypePtr()->BaseType->ToRuntimeTypeHandle();
if (thBaseTypeTemplate.IsNull())
return null;
return (DefType)type.Context.ResolveRuntimeTypeHandle(thBaseTypeTemplate);
}
}
public static DefType GetBaseTypeThatIsCorrectForMDArrays(TypeDesc type)
{
if (type.BaseType == type.Context.GetWellKnownType(WellKnownType.Array))
{
// Use the type from the template, the metadata we have will be inaccurate for multidimensional
// arrays, as we hide the MDArray infrastructure from the metadata.
TypeDesc template = type.ComputeTemplate(false);
return GetBaseTypeUsingRuntimeTypeHandle(template ?? type);
}
return type.BaseType;
}
private void FinishInterfaces(TypeDesc type, TypeBuilderState state)
{
DefType[] interfaces = state.RuntimeInterfaces;
if (interfaces != null)
{
for (int i = 0; i < interfaces.Length; i++)
{
state.HalfBakedRuntimeTypeHandle.SetInterface(i, GetRuntimeTypeHandle(interfaces[i]));
}
}
}
private unsafe void FinishTypeDictionary(TypeDesc type, TypeBuilderState state)
{
if (state.Dictionary != null)
{
// First, update the dictionary slot in the type's vtable to point to the created dictionary when applicable
Debug.Assert(state.HalfBakedDictionary != IntPtr.Zero);
int dictionarySlot = EETypeCreator.GetDictionarySlotInVTable(type);
if (dictionarySlot >= 0)
{
state.HalfBakedRuntimeTypeHandle.SetDictionary(dictionarySlot, state.HalfBakedDictionary);
}
else
{
// Dictionary shouldn't be in the vtable of the type
Debug.Assert(!type.CanShareNormalGenericCode());
}
TypeLoaderLogger.WriteLine("Setting dictionary entries for type " + type.ToString() + " @ " + state.HalfBakedDictionary.LowLevelToString());
state.Dictionary.Finish(this);
}
}
private unsafe void FinishMethodDictionary(InstantiatedMethod method)
{
Debug.Assert(method.Dictionary != null);
TypeLoaderLogger.WriteLine("Setting dictionary entries for method " + method.ToString() + " @ " + method.RuntimeMethodDictionary.LowLevelToString());
method.Dictionary.Finish(this);
}
private unsafe void FinishClassConstructor(TypeDesc type, TypeBuilderState state)
{
if (!state.HasStaticConstructor)
return;
IntPtr canonicalClassConstructorFunctionPointer = IntPtr.Zero; // Pointer to canonical static method to serve as cctor
IntPtr exactClassConstructorFunctionPointer = IntPtr.Zero; // Exact pointer. Takes priority over canonical pointer
if (state.TemplateType == null)
{
if (!type.HasInstantiation)
{
// Non-Generic ReadyToRun types in their current state already have their static field region setup
// with the class constructor initialized.
return;
}
else
{
// For generic types, we need to do the metadata lookup and then resolve to a function pointer.
MethodDesc staticConstructor = type.GetStaticConstructor();
IntPtr staticCctor;
IntPtr unused1;
TypeLoaderEnvironment.MethodAddressType addressType;
if (!TypeLoaderEnvironment.TryGetMethodAddressFromMethodDesc(staticConstructor, out staticCctor, out unused1, out addressType))
{
Environment.FailFast("Unable to find class constructor method address for type:" + type.ToString());
}
Debug.Assert(unused1 == IntPtr.Zero);
switch (addressType)
{
case TypeLoaderEnvironment.MethodAddressType.Exact:
// If we have an exact match, put it in the slot directly
// and return as we don't want to make this into a fat function pointer
exactClassConstructorFunctionPointer = staticCctor;
break;
case TypeLoaderEnvironment.MethodAddressType.Canonical:
case TypeLoaderEnvironment.MethodAddressType.UniversalCanonical:
// If we have a canonical method, setup for generating a fat function pointer
canonicalClassConstructorFunctionPointer = staticCctor;
break;
default:
Environment.FailFast("Invalid MethodAddressType during ClassConstructor discovery");
return;
}
}
}
else if (state.ClassConstructorPointer.HasValue)
{
canonicalClassConstructorFunctionPointer = state.ClassConstructorPointer.Value;
}
else
{
// Lookup the non-GC static data for the template type, and use the class constructor context offset to locate the class constructor's
// fat pointer within the non-GC static data.
IntPtr templateTypeStaticData = TypeLoaderEnvironment.Instance.TryGetNonGcStaticFieldData(GetRuntimeTypeHandle(state.TemplateType));
Debug.Assert(templateTypeStaticData != IntPtr.Zero);
IntPtr* templateTypeClassConstructorSlotPointer = (IntPtr*)((byte*)templateTypeStaticData + ClassConstructorOffset);
IntPtr templateTypeClassConstructorFatFunctionPointer = templateTypeClassConstructorFatFunctionPointer = *templateTypeClassConstructorSlotPointer;
// Crack the fat function pointer into the raw class constructor method pointer and the generic type dictionary.
Debug.Assert(FunctionPointerOps.IsGenericMethodPointer(templateTypeClassConstructorFatFunctionPointer));
GenericMethodDescriptor* templateTypeGenericMethodDescriptor = FunctionPointerOps.ConvertToGenericDescriptor(templateTypeClassConstructorFatFunctionPointer);
Debug.Assert(templateTypeGenericMethodDescriptor != null);
canonicalClassConstructorFunctionPointer = templateTypeGenericMethodDescriptor->MethodFunctionPointer;
}
IntPtr generatedTypeStaticData = GetRuntimeTypeHandle(type).ToEETypePtr()->DynamicNonGcStaticsData;
IntPtr* generatedTypeClassConstructorSlotPointer = (IntPtr*)((byte*)generatedTypeStaticData + ClassConstructorOffset);
if (exactClassConstructorFunctionPointer != IntPtr.Zero)
{
// We have an exact pointer, not a canonical match
// Just set the pointer and return. No need for a fat pointer
*generatedTypeClassConstructorSlotPointer = exactClassConstructorFunctionPointer;
return;
}
// If we reach here, classConstructorFunctionPointer points at a canonical method, that needs to be converted into
// a fat function pointer so that the calli in the ClassConstructorRunner will work properly
Debug.Assert(canonicalClassConstructorFunctionPointer != IntPtr.Zero);
// Use the template type's class constructor method pointer and this type's generic type dictionary to generate a new fat pointer,
// and save that fat pointer back to this type's class constructor context offset within the non-GC static data.
IntPtr instantiationArgument = GetRuntimeTypeHandle(type).ToIntPtr();
IntPtr generatedTypeClassConstructorFatFunctionPointer = FunctionPointerOps.GetGenericMethodFunctionPointer(canonicalClassConstructorFunctionPointer, instantiationArgument);
*generatedTypeClassConstructorSlotPointer = generatedTypeClassConstructorFatFunctionPointer;
}
private void CopyDictionaryFromTypeToAppropriateSlotInDerivedType(TypeDesc baseType, TypeBuilderState derivedTypeState)
{
var baseTypeState = baseType.GetOrCreateTypeBuilderState();
if (baseTypeState.HasDictionaryInVTable)
{
RuntimeTypeHandle baseTypeHandle = GetRuntimeTypeHandle(baseType);
// If the basetype is currently being created by the TypeBuilder, we need to get its dictionary pointer from the
// TypeBuilder state (at this point, the dictionary has not yet been set on the baseTypeHandle). If
// the basetype is not a dynamic type, or has previously been dynamically allocated in the past, the TypeBuilder
// state will have a null dictionary pointer, in which case we need to read it directly from the basetype's vtable
IntPtr dictionaryEntry = baseTypeState.HalfBakedDictionary;
if (dictionaryEntry == IntPtr.Zero)
dictionaryEntry = baseTypeHandle.GetDictionary();
Debug.Assert(dictionaryEntry != IntPtr.Zero);
// Compute the vtable slot for the dictionary entry to set
int dictionarySlot = EETypeCreator.GetDictionarySlotInVTable(baseType);
Debug.Assert(dictionarySlot >= 0);
derivedTypeState.HalfBakedRuntimeTypeHandle.SetDictionary(dictionarySlot, dictionaryEntry);
TypeLoaderLogger.WriteLine("Setting basetype " + baseType.ToString() + " dictionary on type " + derivedTypeState.TypeBeingBuilt.ToString());
}
}
private void FinishBaseTypeAndDictionaries(TypeDesc type, TypeBuilderState state)
{
DefType baseType = GetBaseTypeThatIsCorrectForMDArrays(type);
state.HalfBakedRuntimeTypeHandle.SetBaseType(baseType == null ? default(RuntimeTypeHandle) : GetRuntimeTypeHandle(baseType));
if (baseType == null)
return;
// Update every dictionary in type hierarchy with copy from base type
while (baseType != null)
{
CopyDictionaryFromTypeToAppropriateSlotInDerivedType(baseType, state);
baseType = baseType.BaseType;
}
}
private void FinishRuntimeType(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Finishing type " + type.ToString() + " ...");
var state = type.GetTypeBuilderState();
if (type is DefType)
{
DefType typeAsDefType = (DefType)type;
if (type.HasInstantiation)
{
// Type definitions don't need any further finishing once created by the EETypeCreator
if (type.IsTypeDefinition)
return;
state.HalfBakedRuntimeTypeHandle.SetGenericDefinition(GetRuntimeTypeHandle(typeAsDefType.GetTypeDefinition()));
Instantiation instantiation = typeAsDefType.Instantiation;
state.HalfBakedRuntimeTypeHandle.SetGenericArity((uint)instantiation.Length);
for (int argIndex = 0; argIndex < instantiation.Length; argIndex++)
{
state.HalfBakedRuntimeTypeHandle.SetGenericArgument(argIndex, GetRuntimeTypeHandle(instantiation[argIndex]));
if (state.GenericVarianceFlags != null)
{
Debug.Assert(state.GenericVarianceFlags.Length == instantiation.Length);
state.HalfBakedRuntimeTypeHandle.SetGenericVariance(argIndex, state.GenericVarianceFlags[argIndex]);
}
}
}
FinishBaseTypeAndDictionaries(type, state);
FinishInterfaces(type, state);
FinishTypeDictionary(type, state);
FinishClassConstructor(type, state);
#if FEATURE_UNIVERSAL_GENERICS
// For types that were allocated from universal canonical templates, patch their vtables with
// pointers to calling convention conversion thunks
if (state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
FinishVTableCallingConverterThunks(type, state);
#endif
}
else if (type is ParameterizedType)
{
if (type is ArrayType)
{
ArrayType typeAsSzArrayType = (ArrayType)type;
state.HalfBakedRuntimeTypeHandle.SetRelatedParameterType(GetRuntimeTypeHandle(typeAsSzArrayType.ElementType));
state.HalfBakedRuntimeTypeHandle.SetComponentSize(state.ComponentSize.Value);
FinishInterfaces(type, state);
if (typeAsSzArrayType.IsSzArray && !typeAsSzArrayType.ElementType.IsPointer)
{
FinishTypeDictionary(type, state);
#if FEATURE_UNIVERSAL_GENERICS
// For types that were allocated from universal canonical templates, patch their vtables with
// pointers to calling convention conversion thunks
if (state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
FinishVTableCallingConverterThunks(type, state);
#endif
}
}
else if (type is PointerType)
{
state.HalfBakedRuntimeTypeHandle.SetRelatedParameterType(GetRuntimeTypeHandle(((PointerType)type).ParameterType));
// Nothing else to do for pointer types
}
else if (type is ByRefType)
{
state.HalfBakedRuntimeTypeHandle.SetRelatedParameterType(GetRuntimeTypeHandle(((ByRefType)type).ParameterType));
// We used a pointer type for the template because they're similar enough. Adjust this to be a ByRef.
unsafe
{
Debug.Assert(state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->ParameterizedTypeShape == ParameterizedTypeShapeConstants.Pointer);
state.HalfBakedRuntimeTypeHandle.SetParameterizedTypeShape(ParameterizedTypeShapeConstants.ByRef);
Debug.Assert(state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->ElementType == EETypeElementType.Pointer);
state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->Flags = EETypeBuilderHelpers.ComputeFlags(type);
Debug.Assert(state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->ElementType == EETypeElementType.ByRef);
}
}
}
else
{
Debug.Assert(false);
}
}
private IEnumerable<TypeEntryToRegister> TypesToRegister()
{
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
DefType typeAsDefType = _typesThatNeedTypeHandles[i] as DefType;
if (typeAsDefType == null)
continue;
if (typeAsDefType.HasInstantiation && !typeAsDefType.IsTypeDefinition)
{
yield return new TypeEntryToRegister
{
GenericTypeEntry = new GenericTypeEntry
{
_genericTypeDefinitionHandle = GetRuntimeTypeHandle(typeAsDefType.GetTypeDefinition()),
_genericTypeArgumentHandles = GetRuntimeTypeHandles(typeAsDefType.Instantiation),
_instantiatedTypeHandle = typeAsDefType.GetTypeBuilderState().HalfBakedRuntimeTypeHandle
}
};
}
else
{
yield return new TypeEntryToRegister
{
MetadataDefinitionType = (MetadataType)typeAsDefType
};
}
}
}
private IEnumerable<GenericMethodEntry> MethodsToRegister()
{
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
InstantiatedMethod method = _methodsThatNeedDictionaries[i];
yield return new GenericMethodEntry
{
_declaringTypeHandle = GetRuntimeTypeHandle(method.OwningType),
_genericMethodArgumentHandles = GetRuntimeTypeHandles(method.Instantiation),
_methodNameAndSignature = method.NameAndSignature,
_methodDictionary = method.RuntimeMethodDictionary
};
}
}
private void RegisterGenericTypesAndMethods()
{
int typesToRegisterCount = 0;
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
if (_typesThatNeedTypeHandles[i] is DefType)
typesToRegisterCount++;
}
DynamicGenericsRegistrationData registrationData = new DynamicGenericsRegistrationData
{
TypesToRegisterCount = typesToRegisterCount,
TypesToRegister = (typesToRegisterCount != 0) ? TypesToRegister() : null,
MethodsToRegisterCount = _methodsThatNeedDictionaries.Count,
MethodsToRegister = (_methodsThatNeedDictionaries.Count != 0) ? MethodsToRegister() : null,
};
TypeLoaderEnvironment.Instance.RegisterDynamicGenericTypesAndMethods(registrationData);
}
/// <summary>
/// Publish generic type / method information to the data buffer read by the debugger. This supports
/// debugging dynamically created types / methods
/// </summary>
private void RegisterDebugDataForTypesAndMethods()
{
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
DefType typeAsDefType;
if ((typeAsDefType = _typesThatNeedTypeHandles[i] as DefType) != null)
{
SerializedDebugData.RegisterDebugDataForType(this, typeAsDefType, typeAsDefType.GetTypeBuilderState());
}
}
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
SerializedDebugData.RegisterDebugDataForMethod(this, _methodsThatNeedDictionaries[i]);
}
}
private void FinishTypeAndMethodBuilding()
{
// Once we start allocating EETypes and dictionaries, the only accepted failure is OOM.
// TODO: Error handling - on retry, restart where we failed last time? The current implementation is leaking on OOM.
#if DEBUG
_finalTypeBuilding = true;
#endif
// At this point we know all types that need EETypes. Allocate all EETypes so that we can start building
// their contents.
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
AllocateRuntimeType(_typesThatNeedTypeHandles[i]);
}
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
AllocateRuntimeMethodDictionary(_methodsThatNeedDictionaries[i]);
}
// Do not add more type phases here. Instead, read the required information from the TypeDesc or TypeBuilderState.
// Fill in content of all EETypes
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
FinishRuntimeType(_typesThatNeedTypeHandles[i]);
}
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
FinishMethodDictionary(_methodsThatNeedDictionaries[i]);
}
RegisterDebugDataForTypesAndMethods();
int newArrayTypesCount = 0;
int newPointerTypesCount = 0;
int newByRefTypesCount = 0;
int[] mdArrayNewTypesCount = null;
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
ParameterizedType typeAsParameterizedType = _typesThatNeedTypeHandles[i] as ParameterizedType;
if (typeAsParameterizedType == null)
continue;
if (typeAsParameterizedType.IsSzArray)
newArrayTypesCount++;
else if (typeAsParameterizedType.IsPointer)
newPointerTypesCount++;
else if (typeAsParameterizedType.IsByRef)
newByRefTypesCount++;
else if (typeAsParameterizedType.IsMdArray)
{
if (mdArrayNewTypesCount == null)
mdArrayNewTypesCount = new int[MDArray.MaxRank + 1];
mdArrayNewTypesCount[((ArrayType)typeAsParameterizedType).Rank]++;
}
}
// Reserve space in array/pointer cache's so that the actual adding can be fault-free.
var szArrayCache = TypeSystemContext.GetArrayTypesCache(false, -1);
szArrayCache.Reserve(szArrayCache.Count + newArrayTypesCount);
//
if (mdArrayNewTypesCount != null)
{
for (int i = 0; i < mdArrayNewTypesCount.Length; i++)
{
if (mdArrayNewTypesCount[i] == 0)
continue;
var mdArrayCache = TypeSystemContext.GetArrayTypesCache(true, i);
mdArrayCache.Reserve(mdArrayCache.Count + mdArrayNewTypesCount[i]);
}
}
TypeSystemContext.PointerTypesCache.Reserve(TypeSystemContext.PointerTypesCache.Count + newPointerTypesCount);
TypeSystemContext.ByRefTypesCache.Reserve(TypeSystemContext.ByRefTypesCache.Count + newByRefTypesCount);
// Finally, register all generic types and methods atomically with the runtime
RegisterGenericTypesAndMethods();
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
_typesThatNeedTypeHandles[i].SetRuntimeTypeHandleUnsafe(_typesThatNeedTypeHandles[i].GetTypeBuilderState().HalfBakedRuntimeTypeHandle);
TypeLoaderLogger.WriteLine("Successfully Registered type " + _typesThatNeedTypeHandles[i].ToString() + ".");
}
// Save all constructed array and pointer types to the types cache
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
ParameterizedType typeAsParameterizedType = _typesThatNeedTypeHandles[i] as ParameterizedType;
if (typeAsParameterizedType == null)
continue;
Debug.Assert(!typeAsParameterizedType.RuntimeTypeHandle.IsNull());
Debug.Assert(!typeAsParameterizedType.ParameterType.RuntimeTypeHandle.IsNull());
if (typeAsParameterizedType.IsMdArray)
TypeSystemContext.GetArrayTypesCache(true, ((ArrayType)typeAsParameterizedType).Rank).AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
else if (typeAsParameterizedType.IsSzArray)
TypeSystemContext.GetArrayTypesCache(false, -1).AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
else if (typeAsParameterizedType.IsByRef)
{
unsafe
{
Debug.Assert(typeAsParameterizedType.RuntimeTypeHandle.ToEETypePtr()->IsByRefType);
}
TypeSystemContext.ByRefTypesCache.AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
}
else
{
Debug.Assert(typeAsParameterizedType is PointerType);
unsafe
{
Debug.Assert(typeAsParameterizedType.RuntimeTypeHandle.ToEETypePtr()->IsPointerType);
}
TypeSystemContext.PointerTypesCache.AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
}
}
}
internal void BuildType(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Dynamically allocating new type for " + type.ToString());
// Construct a new type along with all the dependencies that are needed to create interface lists,
// generic dictionaries, etc.
// Start by collecting all dependencies we need to create in order to create this type.
PrepareType(type);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
}
internal bool TryComputeFieldOffset(DefType declaringType, uint fieldOrdinal, out int fieldOffset)
{
TypeLoaderLogger.WriteLine("Computing offset of field #" + fieldOrdinal.LowLevelToString() + " on type " + declaringType.ToString());
// Get the computed field offset result
LayoutInt layoutFieldOffset = declaringType.GetFieldByNativeLayoutOrdinal(fieldOrdinal).Offset;
if (layoutFieldOffset.IsIndeterminate)
{
fieldOffset = 0;
return false;
}
fieldOffset = layoutFieldOffset.AsInt;
return true;
}
private void BuildMethod(InstantiatedMethod method)
{
TypeLoaderLogger.WriteLine("Dynamically allocating new method instantiation for " + method.ToString());
// Start by collecting all dependencies we need to create in order to create this method.
PrepareMethod(method);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
}
private static DefType GetExactDeclaringType(DefType srcDefType, DefType dstDefType)
{
while (srcDefType != null)
{
if (srcDefType.HasSameTypeDefinition(dstDefType))
return srcDefType;
srcDefType = srcDefType.BaseType;
}
Debug.Assert(false);
return null;
}
//
// This method is used by the lazy generic lookup. It resolves the signature of the runtime artifact in the given instantiation context.
//
private unsafe IntPtr BuildGenericLookupTarget(TypeSystemContext typeSystemContext, IntPtr context, IntPtr signature, out IntPtr auxResult)
{
TypeLoaderLogger.WriteLine("BuildGenericLookupTarget for " + context.LowLevelToString() + "/" + signature.LowLevelToString());
TypeManagerHandle typeManager;
NativeReader reader;
uint offset;
// The first is a pointer that points to the TypeManager indirection cell.
// The second is the offset into the native layout info blob in that TypeManager, where the native signature is encoded.
IntPtr** lazySignature = (IntPtr**)signature.ToPointer();
typeManager = new TypeManagerHandle(lazySignature[0][0]);
offset = checked((uint)new IntPtr(lazySignature[1]).ToInt32());
reader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(typeManager);
NativeParser parser = new NativeParser(reader, offset);
GenericContextKind contextKind = (GenericContextKind)parser.GetUnsigned();
NativeFormatModuleInfo moduleInfo = ModuleList.Instance.GetModuleInfoByHandle(typeManager);
NativeLayoutInfoLoadContext nlilContext = new NativeLayoutInfoLoadContext();
nlilContext._module = moduleInfo;
nlilContext._typeSystemContext = typeSystemContext;
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
NativeFormatMetadataUnit metadataUnit = null;
if (moduleInfo.ModuleType == ModuleType.ReadyToRun)
metadataUnit = typeSystemContext.ResolveMetadataUnit(moduleInfo);
#endif
if ((contextKind & GenericContextKind.FromMethodHiddenArg) != 0)
{
RuntimeTypeHandle declaringTypeHandle;
MethodNameAndSignature nameAndSignature;
RuntimeTypeHandle[] genericMethodArgHandles;
bool success = TypeLoaderEnvironment.Instance.TryGetGenericMethodComponents(context, out declaringTypeHandle, out nameAndSignature, out genericMethodArgHandles);
Debug.Assert(success);
if (RuntimeAugments.IsGenericType(declaringTypeHandle))
{
DefType declaringType = (DefType)typeSystemContext.ResolveRuntimeTypeHandle(declaringTypeHandle);
nlilContext._typeArgumentHandles = declaringType.Instantiation;
}
nlilContext._methodArgumentHandles = typeSystemContext.ResolveRuntimeTypeHandles(genericMethodArgHandles);
}
else
{
TypeDesc typeContext = typeSystemContext.ResolveRuntimeTypeHandle(RuntimeAugments.CreateRuntimeTypeHandle(context));
if (typeContext is DefType)
{
nlilContext._typeArgumentHandles = ((DefType)typeContext).Instantiation;
}
else if (typeContext is ArrayType)
{
nlilContext._typeArgumentHandles = new Instantiation(new TypeDesc[] { ((ArrayType)typeContext).ElementType });
}
else
{
Debug.Assert(false);
}
if ((contextKind & GenericContextKind.HasDeclaringType) != 0)
{
// No need to deal with arrays - arrays can't have declaring type
TypeDesc declaringType;
if (moduleInfo.ModuleType == ModuleType.Eager)
{
declaringType = nlilContext.GetType(ref parser);
}
else
{
Debug.Assert(moduleInfo.ModuleType == ModuleType.ReadyToRun);
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
uint typeToken = parser.GetUnsigned();
declaringType = metadataUnit.GetType(((int)typeToken).AsHandle());
#else
Environment.FailFast("Ready to Run module type?");
declaringType = null;
#endif
}
DefType actualContext = GetExactDeclaringType((DefType)typeContext, (DefType)declaringType);
nlilContext._typeArgumentHandles = actualContext.Instantiation;
}
}
if ((contextKind & GenericContextKind.NeedsUSGContext) != 0)
{
IntPtr genericDictionary;
auxResult = IntPtr.Zero;
// There is a cache in place so that this function doesn't get called much, but we still need a registration store,
// so we don't leak allocated contexts
if (TypeLoaderEnvironment.Instance.TryLookupConstructedLazyDictionaryForContext(context, signature, out genericDictionary))
{
return genericDictionary;
}
GenericTypeDictionary ucgDict;
if (moduleInfo.ModuleType == ModuleType.Eager)
{
ucgDict = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionary(this, nlilContext, parser));
}
else
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
Debug.Assert(moduleInfo.ModuleType == ModuleType.ReadyToRun);
FixupCellMetadataResolver metadataResolver = new FixupCellMetadataResolver(metadataUnit, nlilContext);
ucgDict = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionaryFromMetadataTokensAndContext(this, parser, metadataUnit, metadataResolver));
#else
Environment.FailFast("Ready to Run module type?");
ucgDict = null;
#endif
}
genericDictionary = ucgDict.Allocate();
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
ucgDict.Finish(this);
TypeLoaderEnvironment.Instance.RegisterConstructedLazyDictionaryForContext(context, signature, genericDictionary);
return genericDictionary;
}
else
{
GenericDictionaryCell cell;
if (moduleInfo.ModuleType == ModuleType.Eager)
{
cell = GenericDictionaryCell.ParseAndCreateCell(
nlilContext,
ref parser);
}
else
{
Debug.Assert(moduleInfo.ModuleType == ModuleType.ReadyToRun);
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
MetadataFixupKind fixupKind = (MetadataFixupKind)parser.GetUInt8();
Internal.Metadata.NativeFormat.Handle token = parser.GetUnsigned().AsHandle();
Internal.Metadata.NativeFormat.Handle token2 = default(Internal.Metadata.NativeFormat.Handle);
switch (fixupKind)
{
case MetadataFixupKind.GenericConstrainedMethod:
case MetadataFixupKind.NonGenericConstrainedMethod:
case MetadataFixupKind.NonGenericDirectConstrainedMethod:
token2 = parser.GetUnsigned().AsHandle();
break;
}
FixupCellMetadataResolver resolver = new FixupCellMetadataResolver(metadataUnit, nlilContext);
cell = GenericDictionaryCell.CreateCellFromFixupKindAndToken(fixupKind, resolver, token, token2);
#else
Environment.FailFast("Ready to Run module type?");
cell = null;
#endif
}
cell.Prepare(this);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
IntPtr dictionaryCell = cell.CreateLazyLookupCell(this, out auxResult);
return dictionaryCell;
}
}
//
// This method is used to build the floating portion of a generic dictionary.
//
private unsafe IntPtr BuildFloatingDictionary(TypeSystemContext typeSystemContext, IntPtr context, bool isTypeContext, IntPtr fixedDictionary, out bool isNewlyAllocatedDictionary)
{
isNewlyAllocatedDictionary = true;
NativeParser nativeLayoutParser;
NativeLayoutInfoLoadContext nlilContext;
if (isTypeContext)
{
TypeDesc typeContext = typeSystemContext.ResolveRuntimeTypeHandle(*(RuntimeTypeHandle*)&context);
TypeLoaderLogger.WriteLine("Building floating dictionary layout for type " + typeContext.ToString() + "...");
// We should only perform updates to floating dictionaries for types that share normal canonical code
Debug.Assert(typeContext.CanShareNormalGenericCode());
// Computing the template will throw if no template is found.
typeContext.ComputeTemplate();
TypeBuilderState state = typeContext.GetOrCreateTypeBuilderState();
nativeLayoutParser = state.GetParserForNativeLayoutInfo();
nlilContext = state.NativeLayoutInfo.LoadContext;
}
else
{
RuntimeTypeHandle declaringTypeHandle;
MethodNameAndSignature nameAndSignature;
RuntimeTypeHandle[] genericMethodArgHandles;
bool success = TypeLoaderEnvironment.Instance.TryGetGenericMethodComponents(context, out declaringTypeHandle, out nameAndSignature, out genericMethodArgHandles);
Debug.Assert(success);
DefType declaringType = (DefType)typeSystemContext.ResolveRuntimeTypeHandle(declaringTypeHandle);
InstantiatedMethod methodContext = (InstantiatedMethod)typeSystemContext.ResolveGenericMethodInstantiation(
false,
declaringType,
nameAndSignature,
typeSystemContext.ResolveRuntimeTypeHandles(genericMethodArgHandles),
IntPtr.Zero,
false);
TypeLoaderLogger.WriteLine("Building floating dictionary layout for method " + methodContext.ToString() + "...");
// We should only perform updates to floating dictionaries for gemeric methods that share normal canonical code
Debug.Assert(!methodContext.IsNonSharableMethod);
uint nativeLayoutInfoToken;
NativeFormatModuleInfo nativeLayoutModule;
MethodDesc templateMethod = (new TemplateLocator()).TryGetGenericMethodTemplate(methodContext, out nativeLayoutModule, out nativeLayoutInfoToken);
if (templateMethod == null)
throw new TypeBuilder.MissingTemplateException();
NativeReader nativeLayoutInfoReader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(nativeLayoutModule.Handle);
nativeLayoutParser = new NativeParser(nativeLayoutInfoReader, nativeLayoutInfoToken);
nlilContext = new NativeLayoutInfoLoadContext
{
_typeSystemContext = methodContext.Context,
_typeArgumentHandles = methodContext.OwningType.Instantiation,
_methodArgumentHandles = methodContext.Instantiation,
_module = nativeLayoutModule
};
}
NativeParser dictionaryLayoutParser = nativeLayoutParser.GetParserForBagElementKind(BagElementKind.DictionaryLayout);
if (dictionaryLayoutParser.IsNull)
return IntPtr.Zero;
int floatingVersionCellIndex, floatingVersionInLayout;
GenericDictionaryCell[] floatingCells = GenericDictionaryCell.BuildFloatingDictionary(this, nlilContext, dictionaryLayoutParser, out floatingVersionCellIndex, out floatingVersionInLayout);
if (floatingCells == null)
return IntPtr.Zero;
// If the floating section is already constructed, then return. This means we are beaten by another thread.
if (*((IntPtr*)fixedDictionary) != IntPtr.Zero)
{
isNewlyAllocatedDictionary = false;
return *((IntPtr*)fixedDictionary);
}
GenericTypeDictionary floatingDict = new GenericTypeDictionary(floatingCells);
IntPtr result = floatingDict.Allocate();
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
floatingDict.Finish(this);
return result;
}
public static bool TryBuildGenericType(RuntimeTypeHandle genericTypeDefinitionHandle, RuntimeTypeHandle[] genericTypeArgumentHandles, out RuntimeTypeHandle runtimeTypeHandle)
{
Debug.Assert(!genericTypeDefinitionHandle.IsNull() && genericTypeArgumentHandles != null && genericTypeArgumentHandles.Length > 0);
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType genericDef = (DefType)context.ResolveRuntimeTypeHandle(genericTypeDefinitionHandle);
Instantiation genericArgs = context.ResolveRuntimeTypeHandles(genericTypeArgumentHandles);
DefType typeBeingLoaded = context.ResolveGenericInstantiation(genericDef, genericArgs);
new TypeBuilder().BuildType(typeBeingLoaded);
runtimeTypeHandle = typeBeingLoaded.RuntimeTypeHandle;
Debug.Assert(!runtimeTypeHandle.IsNull());
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
return true;
}
catch (MissingTemplateException)
{
runtimeTypeHandle = default(RuntimeTypeHandle);
return false;
}
}
public static bool TryBuildArrayType(RuntimeTypeHandle elementTypeHandle, bool isMdArray, int rank, out RuntimeTypeHandle arrayTypeHandle)
{
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc elementType = context.ResolveRuntimeTypeHandle(elementTypeHandle);
ArrayType arrayType = (ArrayType)context.GetArrayType(elementType, !isMdArray ? -1 : rank);
new TypeBuilder().BuildType(arrayType);
arrayTypeHandle = arrayType.RuntimeTypeHandle;
Debug.Assert(!arrayTypeHandle.IsNull());
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
return true;
}
catch (MissingTemplateException)
{
arrayTypeHandle = default(RuntimeTypeHandle);
return false;
}
}
public static bool TryBuildPointerType(RuntimeTypeHandle pointeeTypeHandle, out RuntimeTypeHandle pointerTypeHandle)
{
if (!TypeSystemContext.PointerTypesCache.TryGetValue(pointeeTypeHandle, out pointerTypeHandle))
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc pointerType = context.GetPointerType(context.ResolveRuntimeTypeHandle(pointeeTypeHandle));
pointerTypeHandle = EETypeCreator.CreatePointerEEType((uint)pointerType.GetHashCode(), pointeeTypeHandle, pointerType);
unsafe
{
Debug.Assert(pointerTypeHandle.ToEETypePtr()->IsPointerType);
}
TypeSystemContext.PointerTypesCache.AddOrGetExisting(pointerTypeHandle);
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
}
return true;
}
public static bool TryBuildByRefType(RuntimeTypeHandle pointeeTypeHandle, out RuntimeTypeHandle byRefTypeHandle)
{
if (!TypeSystemContext.ByRefTypesCache.TryGetValue(pointeeTypeHandle, out byRefTypeHandle))
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc byRefType = context.GetByRefType(context.ResolveRuntimeTypeHandle(pointeeTypeHandle));
byRefTypeHandle = EETypeCreator.CreateByRefEEType((uint)byRefType.GetHashCode(), pointeeTypeHandle, byRefType);
unsafe
{
Debug.Assert(byRefTypeHandle.ToEETypePtr()->IsByRefType);
}
TypeSystemContext.ByRefTypesCache.AddOrGetExisting(byRefTypeHandle);
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
}
return true;
}
public static bool TryBuildGenericMethod(RuntimeTypeHandle declaringTypeHandle, RuntimeTypeHandle[] genericMethodArgHandles, MethodNameAndSignature methodNameAndSignature, out IntPtr methodDictionary)
{
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType declaringType = (DefType)context.ResolveRuntimeTypeHandle(declaringTypeHandle);
InstantiatedMethod methodBeingLoaded = (InstantiatedMethod)context.ResolveGenericMethodInstantiation(false, declaringType, methodNameAndSignature, context.ResolveRuntimeTypeHandles(genericMethodArgHandles), IntPtr.Zero, false);
bool success = TryBuildGenericMethod(methodBeingLoaded, out methodDictionary);
// Recycle the context only if we succesfully built the method. The state may be partially initialized otherwise.
if (success)
TypeSystemContextFactory.Recycle(context);
return success;
}
internal static bool TryBuildGenericMethod(InstantiatedMethod methodBeingLoaded, out IntPtr methodDictionary)
{
try
{
new TypeBuilder().BuildMethod(methodBeingLoaded);
methodDictionary = methodBeingLoaded.RuntimeMethodDictionary;
Debug.Assert(methodDictionary != IntPtr.Zero);
return true;
}
catch (MissingTemplateException)
{
methodDictionary = IntPtr.Zero;
return false;
}
}
private void ResolveSingleCell_Worker(GenericDictionaryCell cell, out IntPtr fixupResolution)
{
cell.Prepare(this);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
// At this stage the pointer we need is accessible via a call to Create on the prepared cell
fixupResolution = cell.Create(this);
}
private void ResolveMultipleCells_Worker(GenericDictionaryCell[] cells, out IntPtr[] fixups)
{
foreach (var cell in cells)
{
cell.Prepare(this);
}
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
// At this stage the pointer we need is accessible via a call to Create on the prepared cell
fixups = new IntPtr[cells.Length];
for (int i = 0; i < fixups.Length; i++)
fixups[i] = cells[i].Create(this);
}
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
private void ResolveSingleMetadataFixup(NativeFormatMetadataUnit module, Handle token, MetadataFixupKind fixupKind, out IntPtr fixupResolution)
{
FixupCellMetadataResolver metadata = new FixupCellMetadataResolver(module);
// Allocate a cell object to represent the fixup, and prepare it
GenericDictionaryCell cell = GenericDictionaryCell.CreateCellFromFixupKindAndToken(fixupKind, metadata, token, default(Handle));
ResolveSingleCell_Worker(cell, out fixupResolution);
}
public static bool TryResolveSingleMetadataFixup(NativeFormatModuleInfo module, int metadataToken, MetadataFixupKind fixupKind, out IntPtr fixupResolution)
{
TypeSystemContext context = TypeSystemContextFactory.Create();
NativeFormatMetadataUnit metadataUnit = context.ResolveMetadataUnit(module);
new TypeBuilder().ResolveSingleMetadataFixup(metadataUnit, metadataToken.AsHandle(), fixupKind, out fixupResolution);
TypeSystemContextFactory.Recycle(context);
return true;
}
public static void ResolveSingleTypeDefinition(QTypeDefinition qTypeDefinition, out IntPtr typeHandle)
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc type = context.GetTypeDescFromQHandle(qTypeDefinition);
GenericDictionaryCell cell = GenericDictionaryCell.CreateTypeHandleCell(type);
new TypeBuilder().ResolveSingleCell_Worker(cell, out typeHandle);
TypeSystemContextFactory.Recycle(context);
}
#endif
internal static void ResolveSingleCell(GenericDictionaryCell cell, out IntPtr fixupResolution)
{
new TypeBuilder().ResolveSingleCell_Worker(cell, out fixupResolution);
}
public static void ResolveMultipleCells(GenericDictionaryCell [] cells, out IntPtr[] fixups)
{
new TypeBuilder().ResolveMultipleCells_Worker(cells, out fixups);
}
public static IntPtr BuildGenericLookupTarget(IntPtr typeContext, IntPtr signature, out IntPtr auxResult)
{
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
IntPtr ret = new TypeBuilder().BuildGenericLookupTarget(context, typeContext, signature, out auxResult);
TypeSystemContextFactory.Recycle(context);
return ret;
}
catch (MissingTemplateException e)
{
// This should not ever happen. The static compiler should ensure that the templates are always
// available for types and methods referenced by lazy dictionary lookups
Environment.FailFast("MissingTemplateException thrown during lazy generic lookup", e);
auxResult = IntPtr.Zero;
return IntPtr.Zero;
}
}
public static bool TryGetFieldOffset(RuntimeTypeHandle declaringTypeHandle, uint fieldOrdinal, out int fieldOffset)
{
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType declaringType = (DefType)context.ResolveRuntimeTypeHandle(declaringTypeHandle);
Debug.Assert(declaringType.HasInstantiation);
bool success = new TypeBuilder().TryComputeFieldOffset(declaringType, fieldOrdinal, out fieldOffset);
TypeSystemContextFactory.Recycle(context);
return success;
}
catch (MissingTemplateException)
{
fieldOffset = int.MinValue;
return false;
}
}
internal static bool TryGetDelegateInvokeMethodSignature(RuntimeTypeHandle delegateTypeHandle, out RuntimeSignature signature)
{
signature = default(RuntimeSignature);
bool success = false;
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType delegateType = (DefType)context.ResolveRuntimeTypeHandle(delegateTypeHandle);
Debug.Assert(delegateType.HasInstantiation);
NativeLayoutInfo universalLayoutInfo;
NativeParser parser = delegateType.GetOrCreateTypeBuilderState().GetParserForUniversalNativeLayoutInfo(out _, out universalLayoutInfo);
if (!parser.IsNull)
{
NativeParser sigParser = parser.GetParserForBagElementKind(BagElementKind.DelegateInvokeSignature);
if (!sigParser.IsNull)
{
signature = RuntimeSignature.CreateFromNativeLayoutSignature(universalLayoutInfo.Module.Handle, sigParser.Offset);
success = true;
}
}
TypeSystemContextFactory.Recycle(context);
return success;
}
//
// This method is used to build the floating portion of a generic dictionary.
//
internal static IntPtr TryBuildFloatingDictionary(IntPtr context, bool isTypeContext, IntPtr fixedDictionary, out bool isNewlyAllocatedDictionary)
{
isNewlyAllocatedDictionary = true;
try
{
TypeSystemContext typeSystemContext = TypeSystemContextFactory.Create();
IntPtr ret = new TypeBuilder().BuildFloatingDictionary(typeSystemContext, context, isTypeContext, fixedDictionary, out isNewlyAllocatedDictionary);
TypeSystemContextFactory.Recycle(typeSystemContext);
return ret;
}
catch (MissingTemplateException e)
{
// This should not ever happen. The static compiler should ensure that the templates are always
// available for types and methods that have floating dictionaries
Environment.FailFast("MissingTemplateException thrown during dictionary update", e);
return IntPtr.Zero;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using System.Runtime;
using System.Text;
using System.Reflection.Runtime.General;
using Internal.Runtime.Augments;
using Internal.Runtime.CompilerServices;
using Internal.Metadata.NativeFormat;
using Internal.NativeFormat;
using Internal.TypeSystem;
using Internal.TypeSystem.NativeFormat;
using Internal.TypeSystem.NoMetadata;
namespace Internal.Runtime.TypeLoader
{
using DynamicGenericsRegistrationData = TypeLoaderEnvironment.DynamicGenericsRegistrationData;
using GenericTypeEntry = TypeLoaderEnvironment.GenericTypeEntry;
using TypeEntryToRegister = TypeLoaderEnvironment.TypeEntryToRegister;
using GenericMethodEntry = TypeLoaderEnvironment.GenericMethodEntry;
using HandleBasedGenericTypeLookup = TypeLoaderEnvironment.HandleBasedGenericTypeLookup;
using DefTypeBasedGenericTypeLookup = TypeLoaderEnvironment.DefTypeBasedGenericTypeLookup;
using HandleBasedGenericMethodLookup = TypeLoaderEnvironment.HandleBasedGenericMethodLookup;
using MethodDescBasedGenericMethodLookup = TypeLoaderEnvironment.MethodDescBasedGenericMethodLookup;
using ThunkKind = CallConverterThunk.ThunkKind;
using VTableSlotMapper = TypeBuilderState.VTableSlotMapper;
internal static class LowLevelListExtensions
{
public static void Expand<T>(this LowLevelList<T> list, int count)
{
if (list.Capacity < count)
list.Capacity = count;
while (list.Count < count)
list.Add(default(T));
}
public static bool HasSetBits(this LowLevelList<bool> list)
{
for (int index = 0; index < list.Count; index++)
{
if (list[index])
return true;
}
return false;
}
}
[Flags]
internal enum FieldLoadState
{
None = 0,
Instance = 1,
Statics = 2,
}
public static class TypeBuilderApi
{
public static void ResolveMultipleCells(GenericDictionaryCell [] cells, out IntPtr[] fixups)
{
TypeBuilder.ResolveMultipleCells(cells, out fixups);
}
}
internal class TypeBuilder
{
public TypeBuilder()
{
TypeLoaderEnvironment.Instance.VerifyTypeLoaderLockHeld();
}
private const int MinimumValueTypeSize = 0x1;
/// <summary>
/// The StaticClassConstructionContext for a type is encoded in the negative space
/// of the NonGCStatic fields of a type.
/// </summary>
public static unsafe readonly int ClassConstructorOffset = -sizeof(System.Runtime.CompilerServices.StaticClassConstructionContext);
private LowLevelList<TypeDesc> _typesThatNeedTypeHandles = new LowLevelList<TypeDesc>();
private LowLevelList<InstantiatedMethod> _methodsThatNeedDictionaries = new LowLevelList<InstantiatedMethod>();
private LowLevelList<TypeDesc> _typesThatNeedPreparation;
private object _epoch = new object();
#if DEBUG
private bool _finalTypeBuilding;
#endif
// Helper exception to abort type building if we do not find the generic type template
internal class MissingTemplateException : Exception
{
}
private bool CheckAllHandlesValidForMethod(MethodDesc method)
{
if (!method.OwningType.RetrieveRuntimeTypeHandleIfPossible())
return false;
for (int i = 0; i < method.Instantiation.Length; i++)
if (!method.Instantiation[i].RetrieveRuntimeTypeHandleIfPossible())
return false;
return true;
}
internal bool RetrieveExactFunctionPointerIfPossible(MethodDesc method, out IntPtr result)
{
result = IntPtr.Zero;
if (!method.IsNonSharableMethod || !CheckAllHandlesValidForMethod(method))
return false;
RuntimeTypeHandle[] genMethodArgs = method.Instantiation.Length > 0 ? new RuntimeTypeHandle[method.Instantiation.Length] : Empty<RuntimeTypeHandle>.Array;
for (int i = 0; i < method.Instantiation.Length; i++)
genMethodArgs[i] = method.Instantiation[i].RuntimeTypeHandle;
return TypeLoaderEnvironment.Instance.TryLookupExactMethodPointerForComponents(method.OwningType.RuntimeTypeHandle, method.NameAndSignature, genMethodArgs, out result);
}
internal bool RetrieveMethodDictionaryIfPossible(InstantiatedMethod method)
{
if (method.RuntimeMethodDictionary != IntPtr.Zero)
return true;
bool allHandlesValid = CheckAllHandlesValidForMethod(method);
TypeLoaderLogger.WriteLine("Looking for method dictionary for method " + method.ToString() + " ... " + (allHandlesValid ? "(All type arg handles valid)" : ""));
IntPtr methodDictionary;
if ((allHandlesValid && TypeLoaderEnvironment.Instance.TryLookupGenericMethodDictionaryForComponents(new HandleBasedGenericMethodLookup(method), out methodDictionary)) ||
(!allHandlesValid && TypeLoaderEnvironment.Instance.TryLookupGenericMethodDictionaryForComponents(new MethodDescBasedGenericMethodLookup(method), out methodDictionary)))
{
TypeLoaderLogger.WriteLine("Found DICT = " + methodDictionary.LowLevelToString() + " for method " + method.ToString());
method.AssociateWithRuntimeMethodDictionary(methodDictionary);
return true;
}
return false;
}
/// <summary>
/// Register the type for preparation. The preparation will be done once the current type is prepared.
/// This is the prefered way to get a dependent type prepared because of it avoids issues with cycles and recursion.
/// </summary>
public void RegisterForPreparation(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Register for preparation " + type.ToString() + " ...");
// If this type has type handle, do nothing and return
if (type.RetrieveRuntimeTypeHandleIfPossible())
return;
var state = type.GetOrCreateTypeBuilderState();
// If this type was already inspected, do nothing and return.
if (state.NeedsTypeHandle)
return;
state.NeedsTypeHandle = true;
if (type.IsCanonicalSubtype(CanonicalFormKind.Any))
return;
if (_typesThatNeedPreparation == null)
_typesThatNeedPreparation = new LowLevelList<TypeDesc>();
_typesThatNeedPreparation.Add(type);
}
/// <summary>
/// Collects all dependencies that need to be created in order to create
/// the method that was passed in.
/// </summary>
public void PrepareMethod(MethodDesc method)
{
TypeLoaderLogger.WriteLine("Preparing method " + method.ToString() + " ...");
RegisterForPreparation(method.OwningType);
if (method.Instantiation.Length == 0)
return;
InstantiatedMethod genericMethod = (InstantiatedMethod)method;
if (RetrieveMethodDictionaryIfPossible(genericMethod))
return;
// If this method was already inspected, do nothing and return
if (genericMethod.NeedsDictionary)
return;
genericMethod.NeedsDictionary = true;
if (genericMethod.IsCanonicalMethod(CanonicalFormKind.Any))
return;
_methodsThatNeedDictionaries.Add(genericMethod);
foreach (var type in genericMethod.Instantiation)
RegisterForPreparation(type);
ParseNativeLayoutInfo(genericMethod);
}
private void InsertIntoNeedsTypeHandleList(TypeBuilderState state, TypeDesc type)
{
if ((type is DefType) || (type is ArrayType) || (type is PointerType) || (type is ByRefType))
{
_typesThatNeedTypeHandles.Add(type);
}
}
/// <summary>
/// Collects all dependencies that need to be created in order to create
/// the type that was passed in.
/// </summary>
internal void PrepareType(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Preparing type " + type.ToString() + " ...");
TypeBuilderState state = type.GetTypeBuilderStateIfExist();
bool hasTypeHandle = type.RetrieveRuntimeTypeHandleIfPossible();
// If this type has type handle, do nothing and return unless we should prepare even in the presence of a type handle
if (hasTypeHandle)
return;
if (state == null)
state = type.GetOrCreateTypeBuilderState();
// If this type was already prepared, do nothing unless we are re-preparing it for the purpose of loading the field layout
if (state.HasBeenPrepared)
{
return;
}
state.HasBeenPrepared = true;
state.NeedsTypeHandle = true;
if (!hasTypeHandle)
{
InsertIntoNeedsTypeHandleList(state, type);
}
bool noExtraPreparation = false; // Set this to true for types which don't need other types to be prepared. I.e GenericTypeDefinitions
if (type is DefType)
{
DefType typeAsDefType = (DefType)type;
if (typeAsDefType.HasInstantiation)
{
if (typeAsDefType.IsTypeDefinition)
{
noExtraPreparation = true;
}
else
{
// This call to ComputeTemplate will find the native layout info for the type, and the template
// For metadata loaded types, a template will not exist, but we may find the NativeLayout describing the generic dictionary
typeAsDefType.ComputeTemplate(state, false);
Debug.Assert(state.TemplateType == null || (state.TemplateType is DefType && !state.TemplateType.RuntimeTypeHandle.IsNull()));
// Collect dependencies
// We need the instantiation arguments to register a generic type
foreach (var instArg in typeAsDefType.Instantiation)
RegisterForPreparation(instArg);
// We need the type definition to register a generic type
if (type.GetTypeDefinition() is MetadataType)
RegisterForPreparation(type.GetTypeDefinition());
ParseNativeLayoutInfo(state, type);
}
}
if (!noExtraPreparation)
state.PrepareStaticGCLayout();
}
else if (type is ParameterizedType)
{
PrepareType(((ParameterizedType)type).ParameterType);
if (type is ArrayType)
{
ArrayType typeAsArrayType = (ArrayType)type;
if (typeAsArrayType.IsSzArray && !typeAsArrayType.ElementType.IsPointer)
{
typeAsArrayType.ComputeTemplate(state);
Debug.Assert(state.TemplateType != null && state.TemplateType is ArrayType && !state.TemplateType.RuntimeTypeHandle.IsNull());
ParseNativeLayoutInfo(state, type);
}
else
{
Debug.Assert(typeAsArrayType.IsMdArray || typeAsArrayType.ElementType.IsPointer);
}
// Assert that non-valuetypes are considered to have pointer size
Debug.Assert(typeAsArrayType.ParameterType.IsValueType || state.ComponentSize == IntPtr.Size);
}
}
else
{
Debug.Assert(false);
}
// Need to prepare the base type first since it is used to compute interfaces
if (!noExtraPreparation)
{
PrepareBaseTypeAndDictionaries(type);
PrepareRuntimeInterfaces(type);
TypeLoaderLogger.WriteLine("Layout for type " + type.ToString() + " complete." +
" IsHFA = " + (state.IsHFA ? "true" : "false") +
" Type size = " + (state.TypeSize.HasValue ? state.TypeSize.Value.LowLevelToString() : "UNDEF") +
" Fields size = " + (state.UnalignedTypeSize.HasValue ? state.UnalignedTypeSize.Value.LowLevelToString() : "UNDEF") +
" Type alignment = " + (state.FieldAlignment.HasValue ? state.FieldAlignment.Value.LowLevelToString() : "UNDEF"));
#if FEATURE_UNIVERSAL_GENERICS
if (state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
{
state.VTableSlotsMapping = new VTableSlotMapper(state.TemplateType.RuntimeTypeHandle.GetNumVtableSlots());
ComputeVTableLayout(type, state.TemplateType, state);
}
#endif
}
}
/// <summary>
/// Recursively triggers preparation for a type's runtime interfaces
/// </summary>
private void PrepareRuntimeInterfaces(TypeDesc type)
{
// Prepare all the interfaces that might be used. (This can be a superset of the
// interfaces explicitly in the NativeLayout.)
foreach (DefType interfaceType in type.RuntimeInterfaces)
{
PrepareType(interfaceType);
}
}
/// <summary>
/// Triggers preparation for a type's base types
/// </summary>
private void PrepareBaseTypeAndDictionaries(TypeDesc type)
{
DefType baseType = type.BaseType;
if (baseType == null)
return;
PrepareType(baseType);
}
private void ProcessTypesNeedingPreparation()
{
// Process the pending types
while (_typesThatNeedPreparation != null)
{
var pendingTypes = _typesThatNeedPreparation;
_typesThatNeedPreparation = null;
for (int i = 0; i < pendingTypes.Count; i++)
PrepareType(pendingTypes[i]);
}
}
private GenericDictionaryCell[] GetGenericMethodDictionaryCellsForMetadataBasedLoad(InstantiatedMethod method, InstantiatedMethod nonTemplateMethod)
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
uint r2rNativeLayoutInfoToken;
GenericDictionaryCell[] cells = null;
NativeFormatModuleInfo r2rNativeLayoutModuleInfo;
if ((new TemplateLocator()).TryGetMetadataNativeLayout(nonTemplateMethod, out r2rNativeLayoutModuleInfo, out r2rNativeLayoutInfoToken))
{
// ReadyToRun dictionary parsing
NativeReader readyToRunReader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(r2rNativeLayoutModuleInfo.Handle);
var readyToRunInfoParser = new NativeParser(readyToRunReader, r2rNativeLayoutInfoToken);
// A null readyToRunInfoParser is a valid situation to end up in
// This can happen if either we have exact code for a method, or if
// we are going to use the universal generic implementation.
// In both of those cases, we do not have any generic dictionary cells
// to put into the dictionary
if (!readyToRunInfoParser.IsNull)
{
NativeFormatMetadataUnit nativeMetadataUnit = method.Context.ResolveMetadataUnit(r2rNativeLayoutModuleInfo);
FixupCellMetadataResolver resolver = new FixupCellMetadataResolver(nativeMetadataUnit, nonTemplateMethod);
cells = GenericDictionaryCell.BuildDictionaryFromMetadataTokensAndContext(this, readyToRunInfoParser, nativeMetadataUnit, resolver);
}
}
return cells;
#else
return null;
#endif
}
internal void ParseNativeLayoutInfo(InstantiatedMethod method)
{
TypeLoaderLogger.WriteLine("Parsing NativeLayoutInfo for method " + method.ToString() + " ...");
Debug.Assert(method.Dictionary == null);
InstantiatedMethod nonTemplateMethod = method;
// Templates are always non-unboxing stubs
if (method.UnboxingStub)
{
// Strip unboxing stub, note the first parameter which is false
nonTemplateMethod = (InstantiatedMethod)method.Context.ResolveGenericMethodInstantiation(false, (DefType)method.OwningType, method.NameAndSignature, method.Instantiation, IntPtr.Zero, false);
}
uint nativeLayoutInfoToken;
NativeFormatModuleInfo nativeLayoutModule;
MethodDesc templateMethod = (new TemplateLocator()).TryGetGenericMethodTemplate(nonTemplateMethod, out nativeLayoutModule, out nativeLayoutInfoToken);
// If the templateMethod found in the static image is missing or universal, see if the R2R layout
// can provide something more specific.
if ((templateMethod == null) || templateMethod.IsCanonicalMethod(CanonicalFormKind.Universal))
{
GenericDictionaryCell[] cells = GetGenericMethodDictionaryCellsForMetadataBasedLoad(method, nonTemplateMethod);
if (cells != null)
{
method.SetGenericDictionary(new GenericMethodDictionary(cells));
return;
}
if (templateMethod == null)
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
// In this case we were looking for the r2r template to create the dictionary, but
// there isn't one. This implies that we don't need a Canon specific dictionary
// so just generate something empty
method.SetGenericDictionary(new GenericMethodDictionary(Array.Empty<GenericDictionaryCell>()));
return;
#else
throw new TypeBuilder.MissingTemplateException();
#endif
}
}
// Ensure that if this method is non-shareable from a normal canonical perspective, then
// its template MUST be a universal canonical template method
Debug.Assert(!method.IsNonSharableMethod || (method.IsNonSharableMethod && templateMethod.IsCanonicalMethod(CanonicalFormKind.Universal)));
NativeReader nativeLayoutInfoReader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(nativeLayoutModule.Handle);
var methodInfoParser = new NativeParser(nativeLayoutInfoReader, nativeLayoutInfoToken);
var context = new NativeLayoutInfoLoadContext
{
_typeSystemContext = method.Context,
_typeArgumentHandles = method.OwningType.Instantiation,
_methodArgumentHandles = method.Instantiation,
_module = nativeLayoutModule
};
BagElementKind kind;
while ((kind = methodInfoParser.GetBagElementKind()) != BagElementKind.End)
{
switch (kind)
{
case BagElementKind.DictionaryLayout:
TypeLoaderLogger.WriteLine("Found BagElementKind.DictionaryLayout");
method.SetGenericDictionary(new GenericMethodDictionary(GenericDictionaryCell.BuildDictionary(this, context, methodInfoParser.GetParserFromRelativeOffset())));
break;
default:
Debug.Fail("Unexpected BagElementKind for generic method with name " + method.NameAndSignature.Name + "! Only BagElementKind.DictionaryLayout should appear.");
throw new BadImageFormatException();
}
}
if (method.Dictionary == null)
method.SetGenericDictionary(new GenericMethodDictionary(Array.Empty<GenericDictionaryCell>()));
}
internal void ParseNativeLayoutInfo(TypeBuilderState state, TypeDesc type)
{
TypeLoaderLogger.WriteLine("Parsing NativeLayoutInfo for type " + type.ToString() + " ...");
bool isTemplateUniversalCanon = false;
if (state.TemplateType != null)
{
isTemplateUniversalCanon = state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal);
}
// If we found the universal template, see if there is a ReadyToRun dictionary description available.
// If so, use that, otherwise, run down the template type loader path with the universal template
if ((state.TemplateType == null) || isTemplateUniversalCanon)
{
// ReadyToRun case - Native Layout is just the dictionary
NativeParser readyToRunInfoParser = state.GetParserForReadyToRunNativeLayoutInfo();
GenericDictionaryCell[] cells = null;
// A null readyToRunInfoParser is a valid situation to end up in
// This can happen if either we have exact code for the method on a type, or if
// we are going to use the universal generic implementation.
// In both of those cases, we do not have any generic dictionary cells
// to put into the dictionary
if (!readyToRunInfoParser.IsNull)
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
NativeFormatMetadataUnit nativeMetadataUnit = type.Context.ResolveMetadataUnit(state.R2RNativeLayoutInfo.Module);
FixupCellMetadataResolver resolver = new FixupCellMetadataResolver(nativeMetadataUnit, type);
cells = GenericDictionaryCell.BuildDictionaryFromMetadataTokensAndContext(this, readyToRunInfoParser, nativeMetadataUnit, resolver);
#endif
}
state.Dictionary = cells != null ? new GenericTypeDictionary(cells) : null;
if (state.TemplateType == null)
return;
}
NativeParser typeInfoParser = state.GetParserForNativeLayoutInfo();
NativeLayoutInfoLoadContext context = state.NativeLayoutInfo.LoadContext;
NativeParser baseTypeParser = new NativeParser();
int nonGcDataSize = 0;
int gcDataSize = 0;
int threadDataSize = 0;
bool staticSizesMeaningful = (type is DefType) // Is type permitted to have static fields
&& !isTemplateUniversalCanon; // Non-universal templates always specify their statics sizes
// if the size can be greater than 0
int baseTypeSize = 0;
bool checkBaseTypeSize = false;
BagElementKind kind;
while ((kind = typeInfoParser.GetBagElementKind()) != BagElementKind.End)
{
switch (kind)
{
case BagElementKind.BaseType:
TypeLoaderLogger.WriteLine("Found BagElementKind.BaseType");
Debug.Assert(baseTypeParser.IsNull);
baseTypeParser = typeInfoParser.GetParserFromRelativeOffset();
break;
case BagElementKind.BaseTypeSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.BaseTypeSize");
Debug.Assert(state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal));
baseTypeSize = checked((int)typeInfoParser.GetUnsigned());
break;
case BagElementKind.ImplementedInterfaces:
TypeLoaderLogger.WriteLine("Found BagElementKind.ImplementedInterfaces");
// Interface handling is done entirely in NativeLayoutInterfacesAlgorithm
typeInfoParser.GetUnsigned();
break;
case BagElementKind.TypeFlags:
{
TypeLoaderLogger.WriteLine("Found BagElementKind.TypeFlags");
Internal.NativeFormat.TypeFlags flags = (Internal.NativeFormat.TypeFlags)typeInfoParser.GetUnsigned();
Debug.Assert(state.HasStaticConstructor == ((flags & Internal.NativeFormat.TypeFlags.HasClassConstructor) != 0));
}
break;
case BagElementKind.ClassConstructorPointer:
TypeLoaderLogger.WriteLine("Found BagElementKind.ClassConstructorPointer");
state.ClassConstructorPointer = context.GetGCStaticInfo(typeInfoParser.GetUnsigned());
break;
case BagElementKind.NonGcStaticDataSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.NonGcStaticDataSize");
// Use checked typecast to int to ensure there aren't any overflows/truncations (size value used in allocation of memory later)
nonGcDataSize = checked((int)typeInfoParser.GetUnsigned());
Debug.Assert(staticSizesMeaningful);
break;
case BagElementKind.GcStaticDataSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.GcStaticDataSize");
// Use checked typecast to int to ensure there aren't any overflows/truncations (size value used in allocation of memory later)
gcDataSize = checked((int)typeInfoParser.GetUnsigned());
Debug.Assert(staticSizesMeaningful);
break;
case BagElementKind.ThreadStaticDataSize:
TypeLoaderLogger.WriteLine("Found BagElementKind.ThreadStaticDataSize");
// Use checked typecast to int to ensure there aren't any overflows/truncations (size value used in allocation of memory later)
threadDataSize = checked((int)typeInfoParser.GetUnsigned());
Debug.Assert(staticSizesMeaningful);
break;
case BagElementKind.GcStaticDesc:
TypeLoaderLogger.WriteLine("Found BagElementKind.GcStaticDesc");
state.GcStaticDesc = context.GetGCStaticInfo(typeInfoParser.GetUnsigned());
break;
case BagElementKind.ThreadStaticDesc:
TypeLoaderLogger.WriteLine("Found BagElementKind.ThreadStaticDesc");
state.ThreadStaticDesc = context.GetGCStaticInfo(typeInfoParser.GetUnsigned());
break;
case BagElementKind.GenericVarianceInfo:
TypeLoaderLogger.WriteLine("Found BagElementKind.GenericVarianceInfo");
NativeParser varianceInfoParser = typeInfoParser.GetParserFromRelativeOffset();
state.GenericVarianceFlags = new GenericVariance[varianceInfoParser.GetSequenceCount()];
for (int i = 0; i < state.GenericVarianceFlags.Length; i++)
state.GenericVarianceFlags[i] = checked((GenericVariance)varianceInfoParser.GetUnsigned());
break;
case BagElementKind.FieldLayout:
TypeLoaderLogger.WriteLine("Found BagElementKind.FieldLayout");
typeInfoParser.SkipInteger(); // Handled in type layout algorithm
break;
#if FEATURE_UNIVERSAL_GENERICS
case BagElementKind.VTableMethodSignatures:
TypeLoaderLogger.WriteLine("Found BagElementKind.VTableMethodSignatures");
ParseVTableMethodSignatures(state, context, typeInfoParser.GetParserFromRelativeOffset());
break;
#endif
case BagElementKind.SealedVTableEntries:
TypeLoaderLogger.WriteLine("Found BagElementKind.SealedVTableEntries");
state.NumSealedVTableEntries = typeInfoParser.GetUnsigned();
break;
case BagElementKind.DictionaryLayout:
TypeLoaderLogger.WriteLine("Found BagElementKind.DictionaryLayout");
Debug.Assert(!isTemplateUniversalCanon, "Universal template nativelayout do not have DictionaryLayout");
Debug.Assert(state.Dictionary == null);
if (!state.TemplateType.RetrieveRuntimeTypeHandleIfPossible())
{
TypeLoaderLogger.WriteLine("ERROR: failed to get type handle for template type " + state.TemplateType.ToString());
throw new TypeBuilder.MissingTemplateException();
}
state.Dictionary = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionary(this, context, typeInfoParser.GetParserFromRelativeOffset()));
break;
default:
TypeLoaderLogger.WriteLine("Found unknown BagElementKind: " + ((int)kind).LowLevelToString());
typeInfoParser.SkipInteger();
break;
}
}
if (staticSizesMeaningful)
{
Debug.Assert((state.NonGcDataSize + (state.HasStaticConstructor ? TypeBuilder.ClassConstructorOffset : 0)) == nonGcDataSize);
Debug.Assert(state.GcDataSize == gcDataSize);
Debug.Assert(state.ThreadDataSize == threadDataSize);
}
#if GENERICS_FORCE_USG
if (isTemplateUniversalCanon && type.CanShareNormalGenericCode())
{
// Even in the GENERICS_FORCE_USG stress mode today, codegen will generate calls to normal-canonical target methods whenever possible.
// Given that we use universal template types to build the dynamic EETypes, these dynamic types will end up with NULL dictionary
// entries, causing the normal-canonical code sharing to fail.
// To fix this problem, we will load the generic dictionary from the non-universal template type, and build a generic dictionary out of
// it for the dynamic type, and store that dictionary pointer in the dynamic MethodTable's structure.
TypeBuilderState tempState = new TypeBuilderState();
tempState.NativeLayoutInfo = new NativeLayoutInfo();
state.NonUniversalTemplateType = tempState.TemplateType = type.Context.TemplateLookup.TryGetNonUniversalTypeTemplate(type, ref tempState.NativeLayoutInfo);
if (tempState.TemplateType != null)
{
Debug.Assert(!tempState.TemplateType.IsCanonicalSubtype(CanonicalFormKind.UniversalCanonLookup));
NativeParser nonUniversalTypeInfoParser = GetNativeLayoutInfoParser(type, ref tempState.NativeLayoutInfo);
NativeParser dictionaryLayoutParser = nonUniversalTypeInfoParser.GetParserForBagElementKind(BagElementKind.DictionaryLayout);
if (!dictionaryLayoutParser.IsNull)
state.Dictionary = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionary(this, context, dictionaryLayoutParser));
// Get the non-universal GCDesc pointers, so we can compare them the ones we will dynamically construct for the type
// and verify they are equal (This is an easy and predictable way of validation for the GCDescs creation logic in the stress mode)
GetNonUniversalGCDescPointers(type, state, tempState);
}
}
#endif
type.ParseBaseType(context, baseTypeParser);
// Assert that parsed base type size matches the BaseTypeSize that we calculated.
Debug.Assert(!checkBaseTypeSize || state.BaseTypeSize == baseTypeSize);
}
#if FEATURE_UNIVERSAL_GENERICS
private void ParseVTableMethodSignatures(TypeBuilderState state, NativeLayoutInfoLoadContext nativeLayoutInfoLoadContext, NativeParser methodSignaturesParser)
{
TypeDesc type = state.TypeBeingBuilt;
if (methodSignaturesParser.IsNull)
return;
// Processing vtable method signatures is only meaningful in the context of universal generics only
Debug.Assert(state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal));
uint numSignatures = methodSignaturesParser.GetUnsigned();
state.VTableMethodSignatures = new TypeBuilderState.VTableLayoutInfo[numSignatures];
for (int i = 0; i < numSignatures; i++)
{
state.VTableMethodSignatures[i] = new TypeBuilderState.VTableLayoutInfo();
uint slot = methodSignaturesParser.GetUnsigned();
state.VTableMethodSignatures[i].VTableSlot = (slot >> 1);
if ((slot & 1) == 1)
{
state.VTableMethodSignatures[i].IsSealedVTableSlot = true;
state.NumSealedVTableMethodSignatures++;
}
NativeParser sigParser = methodSignaturesParser.GetParserFromRelativeOffset();
state.VTableMethodSignatures[i].MethodSignature = RuntimeSignature.CreateFromNativeLayoutSignature(nativeLayoutInfoLoadContext._module.Handle, sigParser.Offset);
}
}
#endif
private unsafe void ComputeVTableLayout(TypeDesc currentType, TypeDesc currentTemplateType, TypeBuilderState targetTypeState)
{
TypeDesc baseType = GetBaseTypeThatIsCorrectForMDArrays(currentType);
TypeDesc baseTemplateType = GetBaseTypeUsingRuntimeTypeHandle(currentTemplateType);
Debug.Assert((baseType == null && baseTemplateType == null) || (baseType != null && baseTemplateType != null));
// Compute the vtable layout for the current type starting with base types first
if (baseType != null)
ComputeVTableLayout(baseType, baseTemplateType, targetTypeState);
currentTemplateType.RetrieveRuntimeTypeHandleIfPossible();
Debug.Assert(!currentTemplateType.RuntimeTypeHandle.IsNull());
Debug.Assert(baseTemplateType == null || !baseTemplateType.RuntimeTypeHandle.IsNull());
// The m_usNumVtableSlots field on EETypes includes the count of vtable slots of the base type,
// so make sure we don't count that twice!
int currentVtableIndex = baseTemplateType == null ? 0 : baseTemplateType.RuntimeTypeHandle.GetNumVtableSlots();
IntPtr dictionarySlotInVtable = IntPtr.Zero;
if (currentType.IsGeneric())
{
if (!currentType.CanShareNormalGenericCode() && currentTemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
{
// We are building a type that cannot share code with normal canonical types, so the type has to have
// the same vtable layout as non-shared generics, meaning no dictionary pointer in the vtable.
// We use universal canonical template types to build such types. Universal canonical types have 'NULL'
// dictionary pointers in their vtables, so we'll start copying the vtable entries right after that
// dictionary slot (dictionaries are accessed/used at runtime in a different way, not through the vtable
// dictionary pointer for such types).
currentVtableIndex++;
}
else if (currentType.CanShareNormalGenericCode())
{
// In the case of a normal canonical type in their base class hierarchy,
// we need to keep track of its dictionary slot in the vtable mapping, and try to
// copy its value values directly from its template type vtable.
// Two possible cases:
// 1) The template type is a normal canonical type. In this case, the dictionary value
// in the vtable slot of the template is NULL, but that's ok because this case is
// correctly handled anyways by the FinishBaseTypeAndDictionaries() API.
// 2) The template type is NOT a canonical type. In this case, the dictionary value
// in the vtable slot of the template is not null, and we keep track of it in the
// VTableSlotsMapping so we can copy it to the dynamic type after creation.
// This corner case is not handled by FinishBaseTypeAndDictionaries(), so we track it
// here.
// Examples:
// 1) Derived<T,U> : Base<U>, instantiated over [int,string]
// 2) Derived<__Universal> : BaseClass, and BaseClass : BaseBaseClass<object>
// 3) Derived<__Universal> : BaseClass<object>
Debug.Assert(currentTemplateType != null && !currentTemplateType.RuntimeTypeHandle.IsNull());
IntPtr* pTemplateVtable = (IntPtr*)((byte*)(currentTemplateType.RuntimeTypeHandle.ToEETypePtr()) + sizeof(MethodTable));
dictionarySlotInVtable = pTemplateVtable[currentVtableIndex];
}
}
else if (currentType is ArrayType)
{
if (currentTemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
{
TypeDesc canonicalElementType = currentType.Context.ConvertToCanon(((ArrayType)currentType).ElementType, CanonicalFormKind.Specific);
bool quickIsNotCanonical = canonicalElementType == ((ArrayType)currentType).ElementType;
Debug.Assert(quickIsNotCanonical == !canonicalElementType.IsCanonicalSubtype(CanonicalFormKind.Any));
if (quickIsNotCanonical)
{
// We are building a type that cannot share code with normal canonical types, so the type has to have
// the same vtable layout as non-shared generics, meaning no dictionary pointer in the vtable.
// We use universal canonical template types to build such types. Universal canonical types have 'NULL'
// dictionary pointers in their vtables, so we'll start copying the vtable entries right after that
// dictionary slot (dictionaries are accessed/used at runtime in a different way, not through the vtable
// dictionary pointer for such types).
currentVtableIndex++;
}
}
}
// Map vtable entries from target type's template type
int numVtableSlotsOnCurrentTemplateType = currentTemplateType.RuntimeTypeHandle.GetNumVtableSlots();
for (; currentVtableIndex < numVtableSlotsOnCurrentTemplateType; currentVtableIndex++)
{
targetTypeState.VTableSlotsMapping.AddMapping(
currentVtableIndex,
targetTypeState.VTableSlotsMapping.NumSlotMappings,
dictionarySlotInVtable);
// Reset dictionarySlotInVtable (only one dictionary slot in vtable per type)
dictionarySlotInVtable = IntPtr.Zero;
}
// Sanity check: vtable of the dynamic type should be equal or smaller than the vtable of the template type
Debug.Assert(targetTypeState.VTableSlotsMapping.NumSlotMappings <= numVtableSlotsOnCurrentTemplateType);
}
/// <summary>
/// Wraps information about how a type is laid out into one package. Types may have been laid out by
/// TypeBuilder (which means they have a gc bitfield), or they could be types that were laid out by NUTC
/// (which means we only have a GCDesc for them). This struct wraps both of those possibilities into
/// one package to be able to write that layout to another bitfield we are constructing. (This is for
/// struct fields.)
/// </summary>
internal unsafe struct GCLayout
{
private LowLevelList<bool> _bitfield;
private unsafe void* _gcdesc;
private int _size;
private bool _isReferenceTypeGCLayout;
public static GCLayout None { get { return new GCLayout(); } }
public static GCLayout SingleReference { get; } = new GCLayout(new LowLevelList<bool>(new bool[1] { true }), false);
public bool IsNone { get { return _bitfield == null && _gcdesc == null; } }
public GCLayout(LowLevelList<bool> bitfield, bool isReferenceTypeGCLayout)
{
Debug.Assert(bitfield != null);
_bitfield = bitfield;
_gcdesc = null;
_size = 0;
_isReferenceTypeGCLayout = isReferenceTypeGCLayout;
}
public GCLayout(RuntimeTypeHandle rtth)
{
MethodTable* MethodTable = rtth.ToEETypePtr();
Debug.Assert(MethodTable != null);
_bitfield = null;
_isReferenceTypeGCLayout = false; // This field is only used for the LowLevelList<bool> path
_gcdesc = MethodTable->HasGCPointers ? (void**)MethodTable - 1 : null;
_size = (int)MethodTable->BaseSize;
}
/// <summary>
/// Writes this layout to the given bitfield.
/// </summary>
/// <param name="bitfield">The bitfield to write a layout to (may be null, at which
/// point it will be created and assigned).</param>
/// <param name="offset">The offset at which we need to write the bitfield.</param>
public void WriteToBitfield(LowLevelList<bool> bitfield, int offset)
{
if (bitfield == null)
throw new ArgumentNullException(nameof(bitfield));
if (IsNone)
return;
// Ensure exactly one of these two are set.
Debug.Assert(_gcdesc != null ^ _bitfield != null);
if (_bitfield != null)
MergeBitfields(bitfield, offset);
else
WriteGCDescToBitfield(bitfield, offset);
}
private unsafe void WriteGCDescToBitfield(LowLevelList<bool> bitfield, int offset)
{
int startIndex = offset / IntPtr.Size;
void** ptr = (void**)_gcdesc;
Debug.Assert(_gcdesc != null);
// Number of series
int count = (int)*ptr-- - 1;
Debug.Assert(count >= 0);
// Ensure capacity for the values we are about to write
int capacity = startIndex + _size / IntPtr.Size - 2;
bitfield.Expand(capacity);
while (count-- >= 0)
{
int offs = (int)*ptr-- / IntPtr.Size - 1;
int len = ((int)*ptr-- + _size) / IntPtr.Size;
Debug.Assert(len > 0);
Debug.Assert(offs >= 0);
for (int i = 0; i < len; i++)
bitfield[startIndex + offs + i] = true;
}
}
private void MergeBitfields(LowLevelList<bool> outputBitfield, int offset)
{
int startIndex = offset / IntPtr.Size;
// These routines represent the GC layout after the MethodTable pointer
// in an object, but the LowLevelList<bool> bitfield logically contains
// the EETypepointer if it is describing a reference type. So, skip the
// first value.
int itemsToSkip = _isReferenceTypeGCLayout ? 1 : 0;
// Assert that we only skip a non-reported pointer.
Debug.Assert(itemsToSkip == 0 || _bitfield[0] == false);
// Ensure capacity for the values we are about to write
int capacity = startIndex + _bitfield.Count - itemsToSkip;
outputBitfield.Expand(capacity);
for (int i = itemsToSkip; i < _bitfield.Count; i++)
{
// We should never overwrite a TRUE value in the table.
Debug.Assert(!outputBitfield[startIndex + i - itemsToSkip] || _bitfield[i]);
outputBitfield[startIndex + i - itemsToSkip] = _bitfield[i];
}
}
}
#if GENERICS_FORCE_USG
private unsafe void GetNonUniversalGCDescPointers(TypeDesc type, TypeBuilderState state, TypeBuilderState tempNonUniversalState)
{
NativeParser nonUniversalTypeInfoParser = GetNativeLayoutInfoParser(type, ref tempNonUniversalState.NativeLayoutInfo);
NativeLayoutInfoLoadContext context = tempNonUniversalState.NativeLayoutInfo.LoadContext;
uint beginOffset = nonUniversalTypeInfoParser.Offset;
uint? staticGCDescId = nonUniversalTypeInfoParser.GetUnsignedForBagElementKind(BagElementKind.GcStaticDesc);
nonUniversalTypeInfoParser.Offset = beginOffset;
uint? threadStaticGCDescId = nonUniversalTypeInfoParser.GetUnsignedForBagElementKind(BagElementKind.ThreadStaticDesc);
if(staticGCDescId.HasValue)
state.NonUniversalStaticGCDesc = context.GetStaticInfo(staticGCDescId.Value);
if (threadStaticGCDescId.HasValue)
state.NonUniversalThreadStaticGCDesc = context.GetStaticInfo(threadStaticGCDescId.Value);
state.NonUniversalInstanceGCDescSize = RuntimeAugments.GetGCDescSize(tempNonUniversalState.TemplateType.RuntimeTypeHandle);
if (state.NonUniversalInstanceGCDescSize > 0)
state.NonUniversalInstanceGCDesc = new IntPtr(((byte*)tempNonUniversalState.TemplateType.RuntimeTypeHandle.ToIntPtr().ToPointer()) - 1);
}
#endif
private unsafe void AllocateRuntimeType(TypeDesc type)
{
TypeBuilderState state = type.GetTypeBuilderState();
Debug.Assert(type is DefType || type is ArrayType || type is PointerType || type is ByRefType);
if (state.ThreadDataSize != 0)
state.ThreadStaticOffset = TypeLoaderEnvironment.Instance.GetNextThreadStaticsOffsetValue();
RuntimeTypeHandle rtt = EETypeCreator.CreateEEType(type, state);
if (state.ThreadDataSize != 0)
TypeLoaderEnvironment.Instance.RegisterDynamicThreadStaticsInfo(state.HalfBakedRuntimeTypeHandle, state.ThreadStaticOffset, state.ThreadDataSize);
TypeLoaderLogger.WriteLine("Allocated new type " + type.ToString() + " with hashcode value = 0x" + type.GetHashCode().LowLevelToString() + " with MethodTable = " + rtt.ToIntPtr().LowLevelToString() + " of size " + rtt.ToEETypePtr()->BaseSize.LowLevelToString());
}
private void AllocateRuntimeMethodDictionary(InstantiatedMethod method)
{
Debug.Assert(method.RuntimeMethodDictionary == IntPtr.Zero && method.Dictionary != null);
IntPtr rmd = method.Dictionary.Allocate();
method.AssociateWithRuntimeMethodDictionary(rmd);
TypeLoaderLogger.WriteLine("Allocated new method dictionary for method " + method.ToString() + " @ " + rmd.LowLevelToString());
}
private RuntimeTypeHandle[] GetGenericContextOfBaseType(DefType type, int vtableMethodSlot)
{
DefType baseType = type.BaseType;
Debug.Assert(baseType == null || !GetRuntimeTypeHandle(baseType).IsNull());
Debug.Assert(vtableMethodSlot < GetRuntimeTypeHandle(type).GetNumVtableSlots());
int numBaseTypeVtableSlots = baseType == null ? 0 : GetRuntimeTypeHandle(baseType).GetNumVtableSlots();
if (vtableMethodSlot < numBaseTypeVtableSlots)
return GetGenericContextOfBaseType(baseType, vtableMethodSlot);
else
return GetRuntimeTypeHandles(type.Instantiation);
}
#if FEATURE_UNIVERSAL_GENERICS
private unsafe void FinishVTableCallingConverterThunks(TypeDesc type, TypeBuilderState state)
{
Debug.Assert(state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal));
if (state.VTableMethodSignatures == null || state.VTableMethodSignatures.Length == 0)
return;
int numVtableSlots = GetRuntimeTypeHandle(type).GetNumVtableSlots();
IntPtr* vtableCells = (IntPtr*)((byte*)GetRuntimeTypeHandle(type).ToIntPtr() + sizeof(MethodTable));
Debug.Assert((state.VTableMethodSignatures.Length - state.NumSealedVTableMethodSignatures) <= numVtableSlots);
TypeDesc baseType = type.BaseType;
int numBaseTypeVtableSlots = GetRuntimeTypeHandle(baseType).GetNumVtableSlots();
// Generic context
RuntimeTypeHandle[] typeArgs = Empty<RuntimeTypeHandle>.Array;
if (type is DefType)
typeArgs = GetRuntimeTypeHandles(((DefType)type).Instantiation);
else if (type is ArrayType)
typeArgs = GetRuntimeTypeHandles(new Instantiation(new TypeDesc[] { ((ArrayType)type).ElementType }));
for (int i = 0; i < state.VTableMethodSignatures.Length; i++)
{
RuntimeTypeHandle[] typeArgsToUse = typeArgs;
int vtableSlotInDynamicType = -1;
if (!state.VTableMethodSignatures[i].IsSealedVTableSlot)
{
vtableSlotInDynamicType = state.VTableSlotsMapping.GetVTableSlotInTargetType((int)state.VTableMethodSignatures[i].VTableSlot);
Debug.Assert(vtableSlotInDynamicType != -1);
if (vtableSlotInDynamicType < numBaseTypeVtableSlots)
{
// Vtable method from the vtable portion of a base type. Use generic context of the basetype defining the vtable slot.
// We should never reach here for array types (the vtable entries of the System.Array basetype should never need a converter).
Debug.Assert(type is DefType);
typeArgsToUse = GetGenericContextOfBaseType((DefType)type, vtableSlotInDynamicType);
}
}
IntPtr originalFunctionPointerFromVTable = state.VTableMethodSignatures[i].IsSealedVTableSlot ?
((IntPtr*)state.HalfBakedSealedVTable)[state.VTableMethodSignatures[i].VTableSlot] :
vtableCells[vtableSlotInDynamicType];
IntPtr thunkPtr = CallConverterThunk.MakeThunk(
ThunkKind.StandardToGeneric,
originalFunctionPointerFromVTable,
state.VTableMethodSignatures[i].MethodSignature,
IntPtr.Zero, // No instantiating arg for non-generic instance methods
typeArgsToUse,
Empty<RuntimeTypeHandle>.Array); // No GVMs in vtables, no no method args
if (state.VTableMethodSignatures[i].IsSealedVTableSlot)
{
// Patch the sealed vtable entry to point to the calling converter thunk
Debug.Assert(state.VTableMethodSignatures[i].VTableSlot < state.NumSealedVTableEntries && state.HalfBakedSealedVTable != IntPtr.Zero);
((IntPtr*)state.HalfBakedSealedVTable)[state.VTableMethodSignatures[i].VTableSlot] = thunkPtr;
}
else
{
// Patch the vtable entry to point to the calling converter thunk
Debug.Assert(vtableSlotInDynamicType < numVtableSlots && vtableCells != null);
vtableCells[vtableSlotInDynamicType] = thunkPtr;
}
}
}
#endif
//
// Returns either the registered type handle or half-baked type handle. This method should be only called
// during final phase of type building.
//
public RuntimeTypeHandle GetRuntimeTypeHandle(TypeDesc type)
{
#if DEBUG
Debug.Assert(_finalTypeBuilding);
#endif
var rtth = type.RuntimeTypeHandle;
if (!rtth.IsNull())
return rtth;
rtth = type.GetTypeBuilderState().HalfBakedRuntimeTypeHandle;
Debug.Assert(!rtth.IsNull());
return rtth;
}
public RuntimeTypeHandle[] GetRuntimeTypeHandles(Instantiation types)
{
if (types.Length == 0)
return Array.Empty<RuntimeTypeHandle>();
RuntimeTypeHandle[] result = new RuntimeTypeHandle[types.Length];
for (int i = 0; i < types.Length; i++)
result[i] = GetRuntimeTypeHandle(types[i]);
return result;
}
public static DefType GetBaseTypeUsingRuntimeTypeHandle(TypeDesc type)
{
type.RetrieveRuntimeTypeHandleIfPossible();
unsafe
{
RuntimeTypeHandle thBaseTypeTemplate = type.RuntimeTypeHandle.ToEETypePtr()->BaseType->ToRuntimeTypeHandle();
if (thBaseTypeTemplate.IsNull())
return null;
return (DefType)type.Context.ResolveRuntimeTypeHandle(thBaseTypeTemplate);
}
}
public static DefType GetBaseTypeThatIsCorrectForMDArrays(TypeDesc type)
{
if (type.BaseType == type.Context.GetWellKnownType(WellKnownType.Array))
{
// Use the type from the template, the metadata we have will be inaccurate for multidimensional
// arrays, as we hide the MDArray infrastructure from the metadata.
TypeDesc template = type.ComputeTemplate(false);
return GetBaseTypeUsingRuntimeTypeHandle(template ?? type);
}
return type.BaseType;
}
private void FinishInterfaces(TypeDesc type, TypeBuilderState state)
{
DefType[] interfaces = state.RuntimeInterfaces;
if (interfaces != null)
{
for (int i = 0; i < interfaces.Length; i++)
{
state.HalfBakedRuntimeTypeHandle.SetInterface(i, GetRuntimeTypeHandle(interfaces[i]));
}
}
}
private unsafe void FinishTypeDictionary(TypeDesc type, TypeBuilderState state)
{
if (state.Dictionary != null)
{
// First, update the dictionary slot in the type's vtable to point to the created dictionary when applicable
Debug.Assert(state.HalfBakedDictionary != IntPtr.Zero);
int dictionarySlot = EETypeCreator.GetDictionarySlotInVTable(type);
if (dictionarySlot >= 0)
{
state.HalfBakedRuntimeTypeHandle.SetDictionary(dictionarySlot, state.HalfBakedDictionary);
}
else
{
// Dictionary shouldn't be in the vtable of the type
Debug.Assert(!type.CanShareNormalGenericCode());
}
TypeLoaderLogger.WriteLine("Setting dictionary entries for type " + type.ToString() + " @ " + state.HalfBakedDictionary.LowLevelToString());
state.Dictionary.Finish(this);
}
}
private unsafe void FinishMethodDictionary(InstantiatedMethod method)
{
Debug.Assert(method.Dictionary != null);
TypeLoaderLogger.WriteLine("Setting dictionary entries for method " + method.ToString() + " @ " + method.RuntimeMethodDictionary.LowLevelToString());
method.Dictionary.Finish(this);
}
private unsafe void FinishClassConstructor(TypeDesc type, TypeBuilderState state)
{
if (!state.HasStaticConstructor)
return;
IntPtr canonicalClassConstructorFunctionPointer = IntPtr.Zero; // Pointer to canonical static method to serve as cctor
IntPtr exactClassConstructorFunctionPointer = IntPtr.Zero; // Exact pointer. Takes priority over canonical pointer
if (state.TemplateType == null)
{
if (!type.HasInstantiation)
{
// Non-Generic ReadyToRun types in their current state already have their static field region setup
// with the class constructor initialized.
return;
}
else
{
// For generic types, we need to do the metadata lookup and then resolve to a function pointer.
MethodDesc staticConstructor = type.GetStaticConstructor();
IntPtr staticCctor;
IntPtr unused1;
TypeLoaderEnvironment.MethodAddressType addressType;
if (!TypeLoaderEnvironment.TryGetMethodAddressFromMethodDesc(staticConstructor, out staticCctor, out unused1, out addressType))
{
Environment.FailFast("Unable to find class constructor method address for type:" + type.ToString());
}
Debug.Assert(unused1 == IntPtr.Zero);
switch (addressType)
{
case TypeLoaderEnvironment.MethodAddressType.Exact:
// If we have an exact match, put it in the slot directly
// and return as we don't want to make this into a fat function pointer
exactClassConstructorFunctionPointer = staticCctor;
break;
case TypeLoaderEnvironment.MethodAddressType.Canonical:
case TypeLoaderEnvironment.MethodAddressType.UniversalCanonical:
// If we have a canonical method, setup for generating a fat function pointer
canonicalClassConstructorFunctionPointer = staticCctor;
break;
default:
Environment.FailFast("Invalid MethodAddressType during ClassConstructor discovery");
return;
}
}
}
else if (state.ClassConstructorPointer.HasValue)
{
canonicalClassConstructorFunctionPointer = state.ClassConstructorPointer.Value;
}
else
{
// Lookup the non-GC static data for the template type, and use the class constructor context offset to locate the class constructor's
// fat pointer within the non-GC static data.
IntPtr templateTypeStaticData = TypeLoaderEnvironment.Instance.TryGetNonGcStaticFieldData(GetRuntimeTypeHandle(state.TemplateType));
Debug.Assert(templateTypeStaticData != IntPtr.Zero);
IntPtr* templateTypeClassConstructorSlotPointer = (IntPtr*)((byte*)templateTypeStaticData + ClassConstructorOffset);
IntPtr templateTypeClassConstructorFatFunctionPointer = templateTypeClassConstructorFatFunctionPointer = *templateTypeClassConstructorSlotPointer;
// Crack the fat function pointer into the raw class constructor method pointer and the generic type dictionary.
Debug.Assert(FunctionPointerOps.IsGenericMethodPointer(templateTypeClassConstructorFatFunctionPointer));
GenericMethodDescriptor* templateTypeGenericMethodDescriptor = FunctionPointerOps.ConvertToGenericDescriptor(templateTypeClassConstructorFatFunctionPointer);
Debug.Assert(templateTypeGenericMethodDescriptor != null);
canonicalClassConstructorFunctionPointer = templateTypeGenericMethodDescriptor->MethodFunctionPointer;
}
IntPtr generatedTypeStaticData = GetRuntimeTypeHandle(type).ToEETypePtr()->DynamicNonGcStaticsData;
IntPtr* generatedTypeClassConstructorSlotPointer = (IntPtr*)((byte*)generatedTypeStaticData + ClassConstructorOffset);
if (exactClassConstructorFunctionPointer != IntPtr.Zero)
{
// We have an exact pointer, not a canonical match
// Just set the pointer and return. No need for a fat pointer
*generatedTypeClassConstructorSlotPointer = exactClassConstructorFunctionPointer;
return;
}
// If we reach here, classConstructorFunctionPointer points at a canonical method, that needs to be converted into
// a fat function pointer so that the calli in the ClassConstructorRunner will work properly
Debug.Assert(canonicalClassConstructorFunctionPointer != IntPtr.Zero);
// Use the template type's class constructor method pointer and this type's generic type dictionary to generate a new fat pointer,
// and save that fat pointer back to this type's class constructor context offset within the non-GC static data.
IntPtr instantiationArgument = GetRuntimeTypeHandle(type).ToIntPtr();
IntPtr generatedTypeClassConstructorFatFunctionPointer = FunctionPointerOps.GetGenericMethodFunctionPointer(canonicalClassConstructorFunctionPointer, instantiationArgument);
*generatedTypeClassConstructorSlotPointer = generatedTypeClassConstructorFatFunctionPointer;
}
private void CopyDictionaryFromTypeToAppropriateSlotInDerivedType(TypeDesc baseType, TypeBuilderState derivedTypeState)
{
var baseTypeState = baseType.GetOrCreateTypeBuilderState();
if (baseTypeState.HasDictionaryInVTable)
{
RuntimeTypeHandle baseTypeHandle = GetRuntimeTypeHandle(baseType);
// If the basetype is currently being created by the TypeBuilder, we need to get its dictionary pointer from the
// TypeBuilder state (at this point, the dictionary has not yet been set on the baseTypeHandle). If
// the basetype is not a dynamic type, or has previously been dynamically allocated in the past, the TypeBuilder
// state will have a null dictionary pointer, in which case we need to read it directly from the basetype's vtable
IntPtr dictionaryEntry = baseTypeState.HalfBakedDictionary;
if (dictionaryEntry == IntPtr.Zero)
dictionaryEntry = baseTypeHandle.GetDictionary();
Debug.Assert(dictionaryEntry != IntPtr.Zero);
// Compute the vtable slot for the dictionary entry to set
int dictionarySlot = EETypeCreator.GetDictionarySlotInVTable(baseType);
Debug.Assert(dictionarySlot >= 0);
derivedTypeState.HalfBakedRuntimeTypeHandle.SetDictionary(dictionarySlot, dictionaryEntry);
TypeLoaderLogger.WriteLine("Setting basetype " + baseType.ToString() + " dictionary on type " + derivedTypeState.TypeBeingBuilt.ToString());
}
}
private void FinishBaseTypeAndDictionaries(TypeDesc type, TypeBuilderState state)
{
DefType baseType = GetBaseTypeThatIsCorrectForMDArrays(type);
state.HalfBakedRuntimeTypeHandle.SetBaseType(baseType == null ? default(RuntimeTypeHandle) : GetRuntimeTypeHandle(baseType));
if (baseType == null)
return;
// Update every dictionary in type hierarchy with copy from base type
while (baseType != null)
{
CopyDictionaryFromTypeToAppropriateSlotInDerivedType(baseType, state);
baseType = baseType.BaseType;
}
}
private void FinishRuntimeType(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Finishing type " + type.ToString() + " ...");
var state = type.GetTypeBuilderState();
if (type is DefType)
{
DefType typeAsDefType = (DefType)type;
if (type.HasInstantiation)
{
// Type definitions don't need any further finishing once created by the EETypeCreator
if (type.IsTypeDefinition)
return;
state.HalfBakedRuntimeTypeHandle.SetGenericDefinition(GetRuntimeTypeHandle(typeAsDefType.GetTypeDefinition()));
Instantiation instantiation = typeAsDefType.Instantiation;
state.HalfBakedRuntimeTypeHandle.SetGenericArity((uint)instantiation.Length);
for (int argIndex = 0; argIndex < instantiation.Length; argIndex++)
{
state.HalfBakedRuntimeTypeHandle.SetGenericArgument(argIndex, GetRuntimeTypeHandle(instantiation[argIndex]));
if (state.GenericVarianceFlags != null)
{
Debug.Assert(state.GenericVarianceFlags.Length == instantiation.Length);
state.HalfBakedRuntimeTypeHandle.SetGenericVariance(argIndex, state.GenericVarianceFlags[argIndex]);
}
}
}
FinishBaseTypeAndDictionaries(type, state);
FinishInterfaces(type, state);
FinishTypeDictionary(type, state);
FinishClassConstructor(type, state);
#if FEATURE_UNIVERSAL_GENERICS
// For types that were allocated from universal canonical templates, patch their vtables with
// pointers to calling convention conversion thunks
if (state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
FinishVTableCallingConverterThunks(type, state);
#endif
}
else if (type is ParameterizedType)
{
if (type is ArrayType)
{
ArrayType typeAsSzArrayType = (ArrayType)type;
state.HalfBakedRuntimeTypeHandle.SetRelatedParameterType(GetRuntimeTypeHandle(typeAsSzArrayType.ElementType));
state.HalfBakedRuntimeTypeHandle.SetComponentSize(state.ComponentSize.Value);
FinishInterfaces(type, state);
if (typeAsSzArrayType.IsSzArray && !typeAsSzArrayType.ElementType.IsPointer)
{
FinishTypeDictionary(type, state);
#if FEATURE_UNIVERSAL_GENERICS
// For types that were allocated from universal canonical templates, patch their vtables with
// pointers to calling convention conversion thunks
if (state.TemplateType != null && state.TemplateType.IsCanonicalSubtype(CanonicalFormKind.Universal))
FinishVTableCallingConverterThunks(type, state);
#endif
}
}
else if (type is PointerType)
{
state.HalfBakedRuntimeTypeHandle.SetRelatedParameterType(GetRuntimeTypeHandle(((PointerType)type).ParameterType));
// Nothing else to do for pointer types
}
else if (type is ByRefType)
{
state.HalfBakedRuntimeTypeHandle.SetRelatedParameterType(GetRuntimeTypeHandle(((ByRefType)type).ParameterType));
// We used a pointer type for the template because they're similar enough. Adjust this to be a ByRef.
unsafe
{
Debug.Assert(state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->ParameterizedTypeShape == ParameterizedTypeShapeConstants.Pointer);
state.HalfBakedRuntimeTypeHandle.SetParameterizedTypeShape(ParameterizedTypeShapeConstants.ByRef);
Debug.Assert(state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->ElementType == EETypeElementType.Pointer);
state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->Flags = EETypeBuilderHelpers.ComputeFlags(type);
Debug.Assert(state.HalfBakedRuntimeTypeHandle.ToEETypePtr()->ElementType == EETypeElementType.ByRef);
}
}
}
else
{
Debug.Assert(false);
}
}
private IEnumerable<TypeEntryToRegister> TypesToRegister()
{
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
DefType typeAsDefType = _typesThatNeedTypeHandles[i] as DefType;
if (typeAsDefType == null)
continue;
if (typeAsDefType.HasInstantiation && !typeAsDefType.IsTypeDefinition)
{
yield return new TypeEntryToRegister
{
GenericTypeEntry = new GenericTypeEntry
{
_genericTypeDefinitionHandle = GetRuntimeTypeHandle(typeAsDefType.GetTypeDefinition()),
_genericTypeArgumentHandles = GetRuntimeTypeHandles(typeAsDefType.Instantiation),
_instantiatedTypeHandle = typeAsDefType.GetTypeBuilderState().HalfBakedRuntimeTypeHandle
}
};
}
else
{
yield return new TypeEntryToRegister
{
MetadataDefinitionType = (MetadataType)typeAsDefType
};
}
}
}
private IEnumerable<GenericMethodEntry> MethodsToRegister()
{
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
InstantiatedMethod method = _methodsThatNeedDictionaries[i];
yield return new GenericMethodEntry
{
_declaringTypeHandle = GetRuntimeTypeHandle(method.OwningType),
_genericMethodArgumentHandles = GetRuntimeTypeHandles(method.Instantiation),
_methodNameAndSignature = method.NameAndSignature,
_methodDictionary = method.RuntimeMethodDictionary
};
}
}
private void RegisterGenericTypesAndMethods()
{
int typesToRegisterCount = 0;
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
if (_typesThatNeedTypeHandles[i] is DefType)
typesToRegisterCount++;
}
DynamicGenericsRegistrationData registrationData = new DynamicGenericsRegistrationData
{
TypesToRegisterCount = typesToRegisterCount,
TypesToRegister = (typesToRegisterCount != 0) ? TypesToRegister() : null,
MethodsToRegisterCount = _methodsThatNeedDictionaries.Count,
MethodsToRegister = (_methodsThatNeedDictionaries.Count != 0) ? MethodsToRegister() : null,
};
TypeLoaderEnvironment.Instance.RegisterDynamicGenericTypesAndMethods(registrationData);
}
/// <summary>
/// Publish generic type / method information to the data buffer read by the debugger. This supports
/// debugging dynamically created types / methods
/// </summary>
private void RegisterDebugDataForTypesAndMethods()
{
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
DefType typeAsDefType;
if ((typeAsDefType = _typesThatNeedTypeHandles[i] as DefType) != null)
{
SerializedDebugData.RegisterDebugDataForType(this, typeAsDefType, typeAsDefType.GetTypeBuilderState());
}
}
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
SerializedDebugData.RegisterDebugDataForMethod(this, _methodsThatNeedDictionaries[i]);
}
}
private void FinishTypeAndMethodBuilding()
{
// Once we start allocating EETypes and dictionaries, the only accepted failure is OOM.
// TODO: Error handling - on retry, restart where we failed last time? The current implementation is leaking on OOM.
#if DEBUG
_finalTypeBuilding = true;
#endif
// At this point we know all types that need EETypes. Allocate all EETypes so that we can start building
// their contents.
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
AllocateRuntimeType(_typesThatNeedTypeHandles[i]);
}
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
AllocateRuntimeMethodDictionary(_methodsThatNeedDictionaries[i]);
}
// Do not add more type phases here. Instead, read the required information from the TypeDesc or TypeBuilderState.
// Fill in content of all EETypes
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
FinishRuntimeType(_typesThatNeedTypeHandles[i]);
}
for (int i = 0; i < _methodsThatNeedDictionaries.Count; i++)
{
FinishMethodDictionary(_methodsThatNeedDictionaries[i]);
}
RegisterDebugDataForTypesAndMethods();
int newArrayTypesCount = 0;
int newPointerTypesCount = 0;
int newByRefTypesCount = 0;
int[] mdArrayNewTypesCount = null;
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
ParameterizedType typeAsParameterizedType = _typesThatNeedTypeHandles[i] as ParameterizedType;
if (typeAsParameterizedType == null)
continue;
if (typeAsParameterizedType.IsSzArray)
newArrayTypesCount++;
else if (typeAsParameterizedType.IsPointer)
newPointerTypesCount++;
else if (typeAsParameterizedType.IsByRef)
newByRefTypesCount++;
else if (typeAsParameterizedType.IsMdArray)
{
if (mdArrayNewTypesCount == null)
mdArrayNewTypesCount = new int[MDArray.MaxRank + 1];
mdArrayNewTypesCount[((ArrayType)typeAsParameterizedType).Rank]++;
}
}
// Reserve space in array/pointer cache's so that the actual adding can be fault-free.
var szArrayCache = TypeSystemContext.GetArrayTypesCache(false, -1);
szArrayCache.Reserve(szArrayCache.Count + newArrayTypesCount);
//
if (mdArrayNewTypesCount != null)
{
for (int i = 0; i < mdArrayNewTypesCount.Length; i++)
{
if (mdArrayNewTypesCount[i] == 0)
continue;
var mdArrayCache = TypeSystemContext.GetArrayTypesCache(true, i);
mdArrayCache.Reserve(mdArrayCache.Count + mdArrayNewTypesCount[i]);
}
}
TypeSystemContext.PointerTypesCache.Reserve(TypeSystemContext.PointerTypesCache.Count + newPointerTypesCount);
TypeSystemContext.ByRefTypesCache.Reserve(TypeSystemContext.ByRefTypesCache.Count + newByRefTypesCount);
// Finally, register all generic types and methods atomically with the runtime
RegisterGenericTypesAndMethods();
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
_typesThatNeedTypeHandles[i].SetRuntimeTypeHandleUnsafe(_typesThatNeedTypeHandles[i].GetTypeBuilderState().HalfBakedRuntimeTypeHandle);
TypeLoaderLogger.WriteLine("Successfully Registered type " + _typesThatNeedTypeHandles[i].ToString() + ".");
}
// Save all constructed array and pointer types to the types cache
for (int i = 0; i < _typesThatNeedTypeHandles.Count; i++)
{
ParameterizedType typeAsParameterizedType = _typesThatNeedTypeHandles[i] as ParameterizedType;
if (typeAsParameterizedType == null)
continue;
Debug.Assert(!typeAsParameterizedType.RuntimeTypeHandle.IsNull());
Debug.Assert(!typeAsParameterizedType.ParameterType.RuntimeTypeHandle.IsNull());
if (typeAsParameterizedType.IsMdArray)
TypeSystemContext.GetArrayTypesCache(true, ((ArrayType)typeAsParameterizedType).Rank).AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
else if (typeAsParameterizedType.IsSzArray)
TypeSystemContext.GetArrayTypesCache(false, -1).AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
else if (typeAsParameterizedType.IsByRef)
{
unsafe
{
Debug.Assert(typeAsParameterizedType.RuntimeTypeHandle.ToEETypePtr()->IsByRefType);
}
TypeSystemContext.ByRefTypesCache.AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
}
else
{
Debug.Assert(typeAsParameterizedType is PointerType);
unsafe
{
Debug.Assert(typeAsParameterizedType.RuntimeTypeHandle.ToEETypePtr()->IsPointerType);
}
TypeSystemContext.PointerTypesCache.AddOrGetExisting(typeAsParameterizedType.RuntimeTypeHandle);
}
}
}
internal void BuildType(TypeDesc type)
{
TypeLoaderLogger.WriteLine("Dynamically allocating new type for " + type.ToString());
// Construct a new type along with all the dependencies that are needed to create interface lists,
// generic dictionaries, etc.
// Start by collecting all dependencies we need to create in order to create this type.
PrepareType(type);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
}
internal bool TryComputeFieldOffset(DefType declaringType, uint fieldOrdinal, out int fieldOffset)
{
TypeLoaderLogger.WriteLine("Computing offset of field #" + fieldOrdinal.LowLevelToString() + " on type " + declaringType.ToString());
// Get the computed field offset result
LayoutInt layoutFieldOffset = declaringType.GetFieldByNativeLayoutOrdinal(fieldOrdinal).Offset;
if (layoutFieldOffset.IsIndeterminate)
{
fieldOffset = 0;
return false;
}
fieldOffset = layoutFieldOffset.AsInt;
return true;
}
private void BuildMethod(InstantiatedMethod method)
{
TypeLoaderLogger.WriteLine("Dynamically allocating new method instantiation for " + method.ToString());
// Start by collecting all dependencies we need to create in order to create this method.
PrepareMethod(method);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
}
private static DefType GetExactDeclaringType(DefType srcDefType, DefType dstDefType)
{
while (srcDefType != null)
{
if (srcDefType.HasSameTypeDefinition(dstDefType))
return srcDefType;
srcDefType = srcDefType.BaseType;
}
Debug.Assert(false);
return null;
}
//
// This method is used by the lazy generic lookup. It resolves the signature of the runtime artifact in the given instantiation context.
//
private unsafe IntPtr BuildGenericLookupTarget(TypeSystemContext typeSystemContext, IntPtr context, IntPtr signature, out IntPtr auxResult)
{
TypeLoaderLogger.WriteLine("BuildGenericLookupTarget for " + context.LowLevelToString() + "/" + signature.LowLevelToString());
TypeManagerHandle typeManager;
NativeReader reader;
uint offset;
// The first is a pointer that points to the TypeManager indirection cell.
// The second is the offset into the native layout info blob in that TypeManager, where the native signature is encoded.
IntPtr** lazySignature = (IntPtr**)signature.ToPointer();
typeManager = new TypeManagerHandle(lazySignature[0][0]);
offset = checked((uint)new IntPtr(lazySignature[1]).ToInt32());
reader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(typeManager);
NativeParser parser = new NativeParser(reader, offset);
GenericContextKind contextKind = (GenericContextKind)parser.GetUnsigned();
NativeFormatModuleInfo moduleInfo = ModuleList.Instance.GetModuleInfoByHandle(typeManager);
NativeLayoutInfoLoadContext nlilContext = new NativeLayoutInfoLoadContext();
nlilContext._module = moduleInfo;
nlilContext._typeSystemContext = typeSystemContext;
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
NativeFormatMetadataUnit metadataUnit = null;
if (moduleInfo.ModuleType == ModuleType.ReadyToRun)
metadataUnit = typeSystemContext.ResolveMetadataUnit(moduleInfo);
#endif
if ((contextKind & GenericContextKind.FromMethodHiddenArg) != 0)
{
RuntimeTypeHandle declaringTypeHandle;
MethodNameAndSignature nameAndSignature;
RuntimeTypeHandle[] genericMethodArgHandles;
bool success = TypeLoaderEnvironment.Instance.TryGetGenericMethodComponents(context, out declaringTypeHandle, out nameAndSignature, out genericMethodArgHandles);
Debug.Assert(success);
if (RuntimeAugments.IsGenericType(declaringTypeHandle))
{
DefType declaringType = (DefType)typeSystemContext.ResolveRuntimeTypeHandle(declaringTypeHandle);
nlilContext._typeArgumentHandles = declaringType.Instantiation;
}
nlilContext._methodArgumentHandles = typeSystemContext.ResolveRuntimeTypeHandles(genericMethodArgHandles);
}
else
{
TypeDesc typeContext = typeSystemContext.ResolveRuntimeTypeHandle(RuntimeAugments.CreateRuntimeTypeHandle(context));
if (typeContext is DefType)
{
nlilContext._typeArgumentHandles = ((DefType)typeContext).Instantiation;
}
else if (typeContext is ArrayType)
{
nlilContext._typeArgumentHandles = new Instantiation(new TypeDesc[] { ((ArrayType)typeContext).ElementType });
}
else
{
Debug.Assert(false);
}
if ((contextKind & GenericContextKind.HasDeclaringType) != 0)
{
// No need to deal with arrays - arrays can't have declaring type
TypeDesc declaringType;
if (moduleInfo.ModuleType == ModuleType.Eager)
{
declaringType = nlilContext.GetType(ref parser);
}
else
{
Debug.Assert(moduleInfo.ModuleType == ModuleType.ReadyToRun);
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
uint typeToken = parser.GetUnsigned();
declaringType = metadataUnit.GetType(((int)typeToken).AsHandle());
#else
Environment.FailFast("Ready to Run module type?");
declaringType = null;
#endif
}
DefType actualContext = GetExactDeclaringType((DefType)typeContext, (DefType)declaringType);
nlilContext._typeArgumentHandles = actualContext.Instantiation;
}
}
if ((contextKind & GenericContextKind.NeedsUSGContext) != 0)
{
IntPtr genericDictionary;
auxResult = IntPtr.Zero;
// There is a cache in place so that this function doesn't get called much, but we still need a registration store,
// so we don't leak allocated contexts
if (TypeLoaderEnvironment.Instance.TryLookupConstructedLazyDictionaryForContext(context, signature, out genericDictionary))
{
return genericDictionary;
}
GenericTypeDictionary ucgDict;
if (moduleInfo.ModuleType == ModuleType.Eager)
{
ucgDict = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionary(this, nlilContext, parser));
}
else
{
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
Debug.Assert(moduleInfo.ModuleType == ModuleType.ReadyToRun);
FixupCellMetadataResolver metadataResolver = new FixupCellMetadataResolver(metadataUnit, nlilContext);
ucgDict = new GenericTypeDictionary(GenericDictionaryCell.BuildDictionaryFromMetadataTokensAndContext(this, parser, metadataUnit, metadataResolver));
#else
Environment.FailFast("Ready to Run module type?");
ucgDict = null;
#endif
}
genericDictionary = ucgDict.Allocate();
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
ucgDict.Finish(this);
TypeLoaderEnvironment.Instance.RegisterConstructedLazyDictionaryForContext(context, signature, genericDictionary);
return genericDictionary;
}
else
{
GenericDictionaryCell cell;
if (moduleInfo.ModuleType == ModuleType.Eager)
{
cell = GenericDictionaryCell.ParseAndCreateCell(
nlilContext,
ref parser);
}
else
{
Debug.Assert(moduleInfo.ModuleType == ModuleType.ReadyToRun);
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
MetadataFixupKind fixupKind = (MetadataFixupKind)parser.GetUInt8();
Internal.Metadata.NativeFormat.Handle token = parser.GetUnsigned().AsHandle();
Internal.Metadata.NativeFormat.Handle token2 = default(Internal.Metadata.NativeFormat.Handle);
switch (fixupKind)
{
case MetadataFixupKind.GenericConstrainedMethod:
case MetadataFixupKind.NonGenericConstrainedMethod:
case MetadataFixupKind.NonGenericDirectConstrainedMethod:
token2 = parser.GetUnsigned().AsHandle();
break;
}
FixupCellMetadataResolver resolver = new FixupCellMetadataResolver(metadataUnit, nlilContext);
cell = GenericDictionaryCell.CreateCellFromFixupKindAndToken(fixupKind, resolver, token, token2);
#else
Environment.FailFast("Ready to Run module type?");
cell = null;
#endif
}
cell.Prepare(this);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
IntPtr dictionaryCell = cell.CreateLazyLookupCell(this, out auxResult);
return dictionaryCell;
}
}
//
// This method is used to build the floating portion of a generic dictionary.
//
private unsafe IntPtr BuildFloatingDictionary(TypeSystemContext typeSystemContext, IntPtr context, bool isTypeContext, IntPtr fixedDictionary, out bool isNewlyAllocatedDictionary)
{
isNewlyAllocatedDictionary = true;
NativeParser nativeLayoutParser;
NativeLayoutInfoLoadContext nlilContext;
if (isTypeContext)
{
TypeDesc typeContext = typeSystemContext.ResolveRuntimeTypeHandle(*(RuntimeTypeHandle*)&context);
TypeLoaderLogger.WriteLine("Building floating dictionary layout for type " + typeContext.ToString() + "...");
// We should only perform updates to floating dictionaries for types that share normal canonical code
Debug.Assert(typeContext.CanShareNormalGenericCode());
// Computing the template will throw if no template is found.
typeContext.ComputeTemplate();
TypeBuilderState state = typeContext.GetOrCreateTypeBuilderState();
nativeLayoutParser = state.GetParserForNativeLayoutInfo();
nlilContext = state.NativeLayoutInfo.LoadContext;
}
else
{
RuntimeTypeHandle declaringTypeHandle;
MethodNameAndSignature nameAndSignature;
RuntimeTypeHandle[] genericMethodArgHandles;
bool success = TypeLoaderEnvironment.Instance.TryGetGenericMethodComponents(context, out declaringTypeHandle, out nameAndSignature, out genericMethodArgHandles);
Debug.Assert(success);
DefType declaringType = (DefType)typeSystemContext.ResolveRuntimeTypeHandle(declaringTypeHandle);
InstantiatedMethod methodContext = (InstantiatedMethod)typeSystemContext.ResolveGenericMethodInstantiation(
false,
declaringType,
nameAndSignature,
typeSystemContext.ResolveRuntimeTypeHandles(genericMethodArgHandles),
IntPtr.Zero,
false);
TypeLoaderLogger.WriteLine("Building floating dictionary layout for method " + methodContext.ToString() + "...");
// We should only perform updates to floating dictionaries for gemeric methods that share normal canonical code
Debug.Assert(!methodContext.IsNonSharableMethod);
uint nativeLayoutInfoToken;
NativeFormatModuleInfo nativeLayoutModule;
MethodDesc templateMethod = (new TemplateLocator()).TryGetGenericMethodTemplate(methodContext, out nativeLayoutModule, out nativeLayoutInfoToken);
if (templateMethod == null)
throw new TypeBuilder.MissingTemplateException();
NativeReader nativeLayoutInfoReader = TypeLoaderEnvironment.Instance.GetNativeLayoutInfoReader(nativeLayoutModule.Handle);
nativeLayoutParser = new NativeParser(nativeLayoutInfoReader, nativeLayoutInfoToken);
nlilContext = new NativeLayoutInfoLoadContext
{
_typeSystemContext = methodContext.Context,
_typeArgumentHandles = methodContext.OwningType.Instantiation,
_methodArgumentHandles = methodContext.Instantiation,
_module = nativeLayoutModule
};
}
NativeParser dictionaryLayoutParser = nativeLayoutParser.GetParserForBagElementKind(BagElementKind.DictionaryLayout);
if (dictionaryLayoutParser.IsNull)
return IntPtr.Zero;
int floatingVersionCellIndex, floatingVersionInLayout;
GenericDictionaryCell[] floatingCells = GenericDictionaryCell.BuildFloatingDictionary(this, nlilContext, dictionaryLayoutParser, out floatingVersionCellIndex, out floatingVersionInLayout);
if (floatingCells == null)
return IntPtr.Zero;
// If the floating section is already constructed, then return. This means we are beaten by another thread.
if (*((IntPtr*)fixedDictionary) != IntPtr.Zero)
{
isNewlyAllocatedDictionary = false;
return *((IntPtr*)fixedDictionary);
}
GenericTypeDictionary floatingDict = new GenericTypeDictionary(floatingCells);
IntPtr result = floatingDict.Allocate();
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
floatingDict.Finish(this);
return result;
}
public static bool TryBuildGenericType(RuntimeTypeHandle genericTypeDefinitionHandle, RuntimeTypeHandle[] genericTypeArgumentHandles, out RuntimeTypeHandle runtimeTypeHandle)
{
Debug.Assert(!genericTypeDefinitionHandle.IsNull() && genericTypeArgumentHandles != null && genericTypeArgumentHandles.Length > 0);
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType genericDef = (DefType)context.ResolveRuntimeTypeHandle(genericTypeDefinitionHandle);
Instantiation genericArgs = context.ResolveRuntimeTypeHandles(genericTypeArgumentHandles);
DefType typeBeingLoaded = context.ResolveGenericInstantiation(genericDef, genericArgs);
new TypeBuilder().BuildType(typeBeingLoaded);
runtimeTypeHandle = typeBeingLoaded.RuntimeTypeHandle;
Debug.Assert(!runtimeTypeHandle.IsNull());
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
return true;
}
catch (MissingTemplateException)
{
runtimeTypeHandle = default(RuntimeTypeHandle);
return false;
}
}
public static bool TryBuildArrayType(RuntimeTypeHandle elementTypeHandle, bool isMdArray, int rank, out RuntimeTypeHandle arrayTypeHandle)
{
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc elementType = context.ResolveRuntimeTypeHandle(elementTypeHandle);
ArrayType arrayType = (ArrayType)context.GetArrayType(elementType, !isMdArray ? -1 : rank);
new TypeBuilder().BuildType(arrayType);
arrayTypeHandle = arrayType.RuntimeTypeHandle;
Debug.Assert(!arrayTypeHandle.IsNull());
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
return true;
}
catch (MissingTemplateException)
{
arrayTypeHandle = default(RuntimeTypeHandle);
return false;
}
}
public static bool TryBuildPointerType(RuntimeTypeHandle pointeeTypeHandle, out RuntimeTypeHandle pointerTypeHandle)
{
if (!TypeSystemContext.PointerTypesCache.TryGetValue(pointeeTypeHandle, out pointerTypeHandle))
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc pointerType = context.GetPointerType(context.ResolveRuntimeTypeHandle(pointeeTypeHandle));
pointerTypeHandle = EETypeCreator.CreatePointerEEType((uint)pointerType.GetHashCode(), pointeeTypeHandle, pointerType);
unsafe
{
Debug.Assert(pointerTypeHandle.ToEETypePtr()->IsPointerType);
}
TypeSystemContext.PointerTypesCache.AddOrGetExisting(pointerTypeHandle);
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
}
return true;
}
public static bool TryBuildByRefType(RuntimeTypeHandle pointeeTypeHandle, out RuntimeTypeHandle byRefTypeHandle)
{
if (!TypeSystemContext.ByRefTypesCache.TryGetValue(pointeeTypeHandle, out byRefTypeHandle))
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc byRefType = context.GetByRefType(context.ResolveRuntimeTypeHandle(pointeeTypeHandle));
byRefTypeHandle = EETypeCreator.CreateByRefEEType((uint)byRefType.GetHashCode(), pointeeTypeHandle, byRefType);
unsafe
{
Debug.Assert(byRefTypeHandle.ToEETypePtr()->IsByRefType);
}
TypeSystemContext.ByRefTypesCache.AddOrGetExisting(byRefTypeHandle);
// Recycle the context only if we succesfully built the type. The state may be partially initialized otherwise.
TypeSystemContextFactory.Recycle(context);
}
return true;
}
public static bool TryBuildGenericMethod(RuntimeTypeHandle declaringTypeHandle, RuntimeTypeHandle[] genericMethodArgHandles, MethodNameAndSignature methodNameAndSignature, out IntPtr methodDictionary)
{
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType declaringType = (DefType)context.ResolveRuntimeTypeHandle(declaringTypeHandle);
InstantiatedMethod methodBeingLoaded = (InstantiatedMethod)context.ResolveGenericMethodInstantiation(false, declaringType, methodNameAndSignature, context.ResolveRuntimeTypeHandles(genericMethodArgHandles), IntPtr.Zero, false);
bool success = TryBuildGenericMethod(methodBeingLoaded, out methodDictionary);
// Recycle the context only if we succesfully built the method. The state may be partially initialized otherwise.
if (success)
TypeSystemContextFactory.Recycle(context);
return success;
}
internal static bool TryBuildGenericMethod(InstantiatedMethod methodBeingLoaded, out IntPtr methodDictionary)
{
try
{
new TypeBuilder().BuildMethod(methodBeingLoaded);
methodDictionary = methodBeingLoaded.RuntimeMethodDictionary;
Debug.Assert(methodDictionary != IntPtr.Zero);
return true;
}
catch (MissingTemplateException)
{
methodDictionary = IntPtr.Zero;
return false;
}
}
private void ResolveSingleCell_Worker(GenericDictionaryCell cell, out IntPtr fixupResolution)
{
cell.Prepare(this);
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
// At this stage the pointer we need is accessible via a call to Create on the prepared cell
fixupResolution = cell.Create(this);
}
private void ResolveMultipleCells_Worker(GenericDictionaryCell[] cells, out IntPtr[] fixups)
{
foreach (var cell in cells)
{
cell.Prepare(this);
}
// Process the pending types
ProcessTypesNeedingPreparation();
FinishTypeAndMethodBuilding();
// At this stage the pointer we need is accessible via a call to Create on the prepared cell
fixups = new IntPtr[cells.Length];
for (int i = 0; i < fixups.Length; i++)
fixups[i] = cells[i].Create(this);
}
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING
private void ResolveSingleMetadataFixup(NativeFormatMetadataUnit module, Handle token, MetadataFixupKind fixupKind, out IntPtr fixupResolution)
{
FixupCellMetadataResolver metadata = new FixupCellMetadataResolver(module);
// Allocate a cell object to represent the fixup, and prepare it
GenericDictionaryCell cell = GenericDictionaryCell.CreateCellFromFixupKindAndToken(fixupKind, metadata, token, default(Handle));
ResolveSingleCell_Worker(cell, out fixupResolution);
}
public static bool TryResolveSingleMetadataFixup(NativeFormatModuleInfo module, int metadataToken, MetadataFixupKind fixupKind, out IntPtr fixupResolution)
{
TypeSystemContext context = TypeSystemContextFactory.Create();
NativeFormatMetadataUnit metadataUnit = context.ResolveMetadataUnit(module);
new TypeBuilder().ResolveSingleMetadataFixup(metadataUnit, metadataToken.AsHandle(), fixupKind, out fixupResolution);
TypeSystemContextFactory.Recycle(context);
return true;
}
public static void ResolveSingleTypeDefinition(QTypeDefinition qTypeDefinition, out IntPtr typeHandle)
{
TypeSystemContext context = TypeSystemContextFactory.Create();
TypeDesc type = context.GetTypeDescFromQHandle(qTypeDefinition);
GenericDictionaryCell cell = GenericDictionaryCell.CreateTypeHandleCell(type);
new TypeBuilder().ResolveSingleCell_Worker(cell, out typeHandle);
TypeSystemContextFactory.Recycle(context);
}
#endif
internal static void ResolveSingleCell(GenericDictionaryCell cell, out IntPtr fixupResolution)
{
new TypeBuilder().ResolveSingleCell_Worker(cell, out fixupResolution);
}
public static void ResolveMultipleCells(GenericDictionaryCell [] cells, out IntPtr[] fixups)
{
new TypeBuilder().ResolveMultipleCells_Worker(cells, out fixups);
}
public static IntPtr BuildGenericLookupTarget(IntPtr typeContext, IntPtr signature, out IntPtr auxResult)
{
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
IntPtr ret = new TypeBuilder().BuildGenericLookupTarget(context, typeContext, signature, out auxResult);
TypeSystemContextFactory.Recycle(context);
return ret;
}
catch (MissingTemplateException e)
{
// This should not ever happen. The static compiler should ensure that the templates are always
// available for types and methods referenced by lazy dictionary lookups
Environment.FailFast("MissingTemplateException thrown during lazy generic lookup", e);
auxResult = IntPtr.Zero;
return IntPtr.Zero;
}
}
public static bool TryGetFieldOffset(RuntimeTypeHandle declaringTypeHandle, uint fieldOrdinal, out int fieldOffset)
{
try
{
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType declaringType = (DefType)context.ResolveRuntimeTypeHandle(declaringTypeHandle);
Debug.Assert(declaringType.HasInstantiation);
bool success = new TypeBuilder().TryComputeFieldOffset(declaringType, fieldOrdinal, out fieldOffset);
TypeSystemContextFactory.Recycle(context);
return success;
}
catch (MissingTemplateException)
{
fieldOffset = int.MinValue;
return false;
}
}
internal static bool TryGetDelegateInvokeMethodSignature(RuntimeTypeHandle delegateTypeHandle, out RuntimeSignature signature)
{
signature = default(RuntimeSignature);
bool success = false;
TypeSystemContext context = TypeSystemContextFactory.Create();
DefType delegateType = (DefType)context.ResolveRuntimeTypeHandle(delegateTypeHandle);
Debug.Assert(delegateType.HasInstantiation);
NativeLayoutInfo universalLayoutInfo;
NativeParser parser = delegateType.GetOrCreateTypeBuilderState().GetParserForUniversalNativeLayoutInfo(out _, out universalLayoutInfo);
if (!parser.IsNull)
{
NativeParser sigParser = parser.GetParserForBagElementKind(BagElementKind.DelegateInvokeSignature);
if (!sigParser.IsNull)
{
signature = RuntimeSignature.CreateFromNativeLayoutSignature(universalLayoutInfo.Module.Handle, sigParser.Offset);
success = true;
}
}
TypeSystemContextFactory.Recycle(context);
return success;
}
//
// This method is used to build the floating portion of a generic dictionary.
//
internal static IntPtr TryBuildFloatingDictionary(IntPtr context, bool isTypeContext, IntPtr fixedDictionary, out bool isNewlyAllocatedDictionary)
{
isNewlyAllocatedDictionary = true;
try
{
TypeSystemContext typeSystemContext = TypeSystemContextFactory.Create();
IntPtr ret = new TypeBuilder().BuildFloatingDictionary(typeSystemContext, context, isTypeContext, fixedDictionary, out isNewlyAllocatedDictionary);
TypeSystemContextFactory.Recycle(typeSystemContext);
return ret;
}
catch (MissingTemplateException e)
{
// This should not ever happen. The static compiler should ensure that the templates are always
// available for types and methods that have floating dictionaries
Environment.FailFast("MissingTemplateException thrown during dictionary update", e);
return IntPtr.Zero;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/vm/ecalllist.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ECallList.H
//
// This file contains definitions of FCall entrypoints
//
#ifndef FCFuncElement
#define FCFuncElement(name, impl)
#endif
#ifndef FCFuncElementSig
#define FCFuncElementSig(name,sig,impl)
#endif
#ifndef FCDynamic
#define FCDynamic(name,dynamicID)
#endif
#ifndef FCDynamicSig
#define FCDynamicSig(name,sig,dynamicID)
#endif
#ifndef FCUnreferenced
#define FCUnreferenced
#endif
#ifndef FCFuncStart
#define FCFuncStart(name)
#endif
#ifndef FCFuncEnd
#define FCFuncEnd()
#endif
#ifndef FCClassElement
#define FCClassElement(name,namespace,funcs)
#endif
//
//
// Entrypoint definitions
//
//
FCFuncStart(gDependentHandleFuncs)
FCFuncElement("InternalInitialize", DependentHandle::InternalInitialize)
FCFuncElement("InternalGetTarget", DependentHandle::InternalGetTarget)
FCFuncElement("InternalGetDependent", DependentHandle::InternalGetDependent)
FCFuncElement("InternalGetTargetAndDependent", DependentHandle::InternalGetTargetAndDependent)
FCFuncElement("InternalSetTargetToNull", DependentHandle::InternalSetTargetToNull)
FCFuncElement("InternalSetDependent", DependentHandle::InternalSetDependent)
FCFuncElement("InternalFree", DependentHandle::InternalFree)
FCFuncEnd()
FCFuncStart(gEnumFuncs)
FCFuncElement("InternalGetUnderlyingType", ReflectionEnum::InternalGetEnumUnderlyingType)
FCFuncElement("InternalGetCorElementType", ReflectionEnum::InternalGetCorElementType)
FCFuncElement("InternalBoxEnum", ReflectionEnum::InternalBoxEnum)
FCFuncEnd()
FCFuncStart(gObjectFuncs)
FCFuncElement("GetType", ObjectNative::GetClass)
FCFuncEnd()
FCFuncStart(gStringFuncs)
FCDynamic("FastAllocateString", ECall::FastAllocateString)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ArrChar_RetVoid, ECall::CtorCharArrayManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ArrChar_Int_Int_RetVoid, ECall::CtorCharArrayStartLengthManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_RetVoid, ECall::CtorCharPtrManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_Int_Int_RetVoid, ECall::CtorCharPtrStartLengthManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_Char_Int_RetVoid, ECall::CtorCharCountManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ReadOnlySpanOfChar_RetVoid, ECall::CtorReadOnlySpanOfCharManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_RetVoid, ECall::CtorSBytePtrManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_RetVoid, ECall::CtorSBytePtrStartLengthManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_Encoding_RetVoid, ECall::CtorSBytePtrStartLengthEncodingManaged)
FCFuncElement("SetTrailByte", COMString::FCSetTrailByte)
FCFuncElement("TryGetTrailByte", COMString::FCTryGetTrailByte)
FCFuncElement("IsInterned", AppDomainNative::IsStringInterned)
FCFuncElement("Intern", AppDomainNative::GetOrInternString)
FCFuncEnd()
FCFuncStart(gValueTypeFuncs)
FCFuncElement("CanCompareBits", ValueTypeHelper::CanCompareBits)
FCFuncElement("FastEqualsCheck", ValueTypeHelper::FastEqualsCheck)
FCFuncElement("GetHashCode", ValueTypeHelper::GetHashCode)
FCFuncElement("GetHashCodeOfPtr", ValueTypeHelper::GetHashCodeOfPtr)
FCFuncEnd()
FCFuncStart(gDiagnosticsDebugger)
FCFuncElement("BreakInternal", DebugDebugger::Break)
FCFuncElement("get_IsAttached", DebugDebugger::IsDebuggerAttached)
FCFuncElement("IsLogging", DebugDebugger::IsLogging)
FCFuncElement("CustomNotification", DebugDebugger::CustomNotification)
FCFuncEnd()
FCFuncStart(gDiagnosticsStackTrace)
FCFuncElement("GetStackFramesInternal", DebugStackTrace::GetStackFramesInternal)
FCFuncEnd()
FCFuncStart(gEnvironmentFuncs)
FCFuncElement("get_CurrentManagedThreadId", JIT_GetCurrentManagedThreadId)
FCFuncElement("get_TickCount", SystemNative::GetTickCount)
FCFuncElement("get_TickCount64", SystemNative::GetTickCount64)
FCFuncElement("set_ExitCode", SystemNative::SetExitCode)
FCFuncElement("get_ExitCode", SystemNative::GetExitCode)
FCFuncElement("GetCommandLineArgsNative", SystemNative::GetCommandLineArgs)
FCFuncElementSig("FailFast", &gsig_SM_Str_RetVoid, SystemNative::FailFast)
FCFuncElementSig("FailFast", &gsig_SM_Str_Exception_RetVoid, SystemNative::FailFastWithException)
FCFuncElementSig("FailFast", &gsig_SM_Str_Exception_Str_RetVoid, SystemNative::FailFastWithExceptionAndSource)
FCFuncEnd()
FCFuncStart(gExceptionFuncs)
FCFuncElement("IsImmutableAgileException", ExceptionNative::IsImmutableAgileException)
FCFuncElement("GetMethodFromStackTrace", SystemNative::GetMethodFromStackTrace)
FCFuncElement("PrepareForForeignExceptionRaise", ExceptionNative::PrepareForForeignExceptionRaise)
FCFuncElement("GetStackTracesDeepCopy", ExceptionNative::GetStackTracesDeepCopy)
FCFuncElement("SaveStackTracesFromDeepCopy", ExceptionNative::SaveStackTracesFromDeepCopy)
FCFuncElement("GetExceptionCount", ExceptionNative::GetExceptionCount)
FCFuncEnd()
FCFuncStart(gTypedReferenceFuncs)
FCFuncElement("InternalToObject", ReflectionInvocation::TypedReferenceToObject)
FCFuncElement("InternalMakeTypedReference", ReflectionInvocation::MakeTypedReference)
FCFuncEnd()
FCFuncStart(gSystem_Type)
FCFuncElement("GetTypeFromHandle", RuntimeTypeHandle::GetTypeFromHandle)
FCFuncElement("GetTypeFromHandleUnsafe", RuntimeTypeHandle::GetRuntimeType)
FCFuncEnd()
FCFuncStart(gSystem_RuntimeType)
FCFuncElement("GetGUID", ReflectionInvocation::GetGUID)
FCFuncElement("_CreateEnum", ReflectionInvocation::CreateEnum)
FCFuncElement("CanValueSpecialCast", ReflectionInvocation::CanValueSpecialCast)
FCFuncElement("AllocateValueType", ReflectionInvocation::AllocateValueType)
#if defined(FEATURE_COMINTEROP)
FCFuncElement("InvokeDispMethod", ReflectionInvocation::InvokeDispMethod)
#endif // defined(FEATURE_COMINTEROP)
FCFuncEnd()
FCFuncStart(gCOMTypeHandleFuncs)
FCFuncElement("IsInstanceOfType", RuntimeTypeHandle::IsInstanceOfType)
FCFuncElement("GetDeclaringMethod", RuntimeTypeHandle::GetDeclaringMethod)
FCFuncElement("GetDeclaringType", RuntimeTypeHandle::GetDeclaringType)
FCFuncElement("GetFirstIntroducedMethod", RuntimeTypeHandle::GetFirstIntroducedMethod)
FCFuncElement("GetNextIntroducedMethod", RuntimeTypeHandle::GetNextIntroducedMethod)
FCFuncElement("GetCorElementType", RuntimeTypeHandle::GetCorElementType)
FCFuncElement("GetAssembly", RuntimeTypeHandle::GetAssembly)
FCFuncElement("GetModule", RuntimeTypeHandle::GetModule)
FCFuncElement("GetBaseType", RuntimeTypeHandle::GetBaseType)
FCFuncElement("GetElementType", RuntimeTypeHandle::GetElementType)
FCFuncElement("GetArrayRank", RuntimeTypeHandle::GetArrayRank)
FCFuncElement("GetToken", RuntimeTypeHandle::GetToken)
FCFuncElement("_GetUtf8Name", RuntimeTypeHandle::GetUtf8Name)
FCFuncElement("GetMethodAt", RuntimeTypeHandle::GetMethodAt)
FCFuncElement("GetFields", RuntimeTypeHandle::GetFields)
FCFuncElement("GetInterfaces", RuntimeTypeHandle::GetInterfaces)
FCFuncElement("GetAttributes", RuntimeTypeHandle::GetAttributes)
FCFuncElement("_GetMetadataImport", RuntimeTypeHandle::GetMetadataImport)
FCFuncElement("GetNumVirtuals", RuntimeTypeHandle::GetNumVirtuals)
FCFuncElement("GetNumVirtualsAndStaticVirtuals", RuntimeTypeHandle::GetNumVirtualsAndStaticVirtuals)
FCFuncElement("IsValueType", RuntimeTypeHandle::IsValueType)
FCFuncElement("IsInterface", RuntimeTypeHandle::IsInterface)
FCFuncElement("IsByRefLike", RuntimeTypeHandle::IsByRefLike)
FCFuncElement("CanCastTo", RuntimeTypeHandle::CanCastTo)
FCFuncElement("HasInstantiation", RuntimeTypeHandle::HasInstantiation)
FCFuncElement("GetGenericVariableIndex", RuntimeTypeHandle::GetGenericVariableIndex)
FCFuncElement("IsGenericVariable", RuntimeTypeHandle::IsGenericVariable)
FCFuncElement("IsGenericTypeDefinition", RuntimeTypeHandle::IsGenericTypeDefinition)
FCFuncElement("ContainsGenericVariables", RuntimeTypeHandle::ContainsGenericVariables)
FCFuncElement("SatisfiesConstraints", RuntimeTypeHandle::SatisfiesConstraints)
#ifdef FEATURE_COMINTEROP
FCFuncElement("AllocateComObject", RuntimeTypeHandle::AllocateComObject)
#endif // FEATURE_COMINTEROP
FCFuncElement("CompareCanonicalHandles", RuntimeTypeHandle::CompareCanonicalHandles)
FCFuncElement("GetValueInternal", RuntimeTypeHandle::GetValueInternal)
FCFuncElement("IsEquivalentTo", RuntimeTypeHandle::IsEquivalentTo)
FCFuncEnd()
FCFuncStart(gMetaDataImport)
FCFuncElement("_GetDefaultValue", MetaDataImport::GetDefaultValue)
FCFuncElement("_GetName", MetaDataImport::GetName)
FCFuncElement("_GetUserString", MetaDataImport::GetUserString)
FCFuncElement("_GetScopeProps", MetaDataImport::GetScopeProps)
FCFuncElement("_GetClassLayout", MetaDataImport::GetClassLayout)
FCFuncElement("_GetSignatureFromToken", MetaDataImport::GetSignatureFromToken)
FCFuncElement("_GetNamespace", MetaDataImport::GetNamespace)
FCFuncElement("_GetEventProps", MetaDataImport::GetEventProps)
FCFuncElement("_GetFieldDefProps", MetaDataImport::GetFieldDefProps)
FCFuncElement("_GetPropertyProps", MetaDataImport::GetPropertyProps)
FCFuncElement("_GetParentToken", MetaDataImport::GetParentToken)
FCFuncElement("_GetParamDefProps", MetaDataImport::GetParamDefProps)
FCFuncElement("_GetGenericParamProps", MetaDataImport::GetGenericParamProps)
FCFuncElement("_Enum", MetaDataImport::Enum)
FCFuncElement("_GetMemberRefProps", MetaDataImport::GetMemberRefProps)
FCFuncElement("_GetCustomAttributeProps", MetaDataImport::GetCustomAttributeProps)
FCFuncElement("_GetFieldOffset", MetaDataImport::GetFieldOffset)
FCFuncElement("_GetSigOfFieldDef", MetaDataImport::GetSigOfFieldDef)
FCFuncElement("_GetSigOfMethodDef", MetaDataImport::GetSigOfMethodDef)
FCFuncElement("_GetFieldMarshal", MetaDataImport::GetFieldMarshal)
FCFuncElement("_GetPInvokeMap", MetaDataImport::GetPinvokeMap)
FCFuncElement("_IsValidToken", MetaDataImport::IsValidToken)
FCFuncElement("_GetMarshalAs", MetaDataImport::GetMarshalAs)
FCFuncEnd()
FCFuncStart(gSignatureNative)
FCFuncElement("GetSignature", SignatureNative::GetSignature)
FCFuncElement("GetCustomModifiers", SignatureNative::GetCustomModifiers)
FCFuncElement("CompareSig", SignatureNative::CompareSig)
FCFuncEnd()
FCFuncStart(gRuntimeMethodHandle)
FCFuncElement("_GetCurrentMethod", RuntimeMethodHandle::GetCurrentMethod)
FCFuncElement("InvokeMethod", RuntimeMethodHandle::InvokeMethod)
FCFuncElement("GetImplAttributes", RuntimeMethodHandle::GetImplAttributes)
FCFuncElement("GetAttributes", RuntimeMethodHandle::GetAttributes)
FCFuncElement("GetDeclaringType", RuntimeMethodHandle::GetDeclaringType)
FCFuncElement("GetSlot", RuntimeMethodHandle::GetSlot)
FCFuncElement("GetMethodDef", RuntimeMethodHandle::GetMethodDef)
FCFuncElement("GetName", RuntimeMethodHandle::GetName)
FCFuncElement("_GetUtf8Name", RuntimeMethodHandle::GetUtf8Name)
FCFuncElement("MatchesNameHash", RuntimeMethodHandle::MatchesNameHash)
FCFuncElement("HasMethodInstantiation", RuntimeMethodHandle::HasMethodInstantiation)
FCFuncElement("IsGenericMethodDefinition", RuntimeMethodHandle::IsGenericMethodDefinition)
FCFuncElement("GetGenericParameterCount", RuntimeMethodHandle::GetGenericParameterCount)
FCFuncElement("IsTypicalMethodDefinition", RuntimeMethodHandle::IsTypicalMethodDefinition)
FCFuncElement("GetStubIfNeeded", RuntimeMethodHandle::GetStubIfNeeded)
FCFuncElement("GetMethodFromCanonical", RuntimeMethodHandle::GetMethodFromCanonical)
FCFuncElement("IsDynamicMethod", RuntimeMethodHandle::IsDynamicMethod)
FCFuncElement("GetMethodBody", RuntimeMethodHandle::GetMethodBody)
FCFuncElement("IsConstructor", RuntimeMethodHandle::IsConstructor)
FCFuncElement("GetResolver", RuntimeMethodHandle::GetResolver)
FCFuncElement("GetLoaderAllocator", RuntimeMethodHandle::GetLoaderAllocator)
FCFuncEnd()
FCFuncStart(gCOMFieldHandleNewFuncs)
FCFuncElement("GetValue", RuntimeFieldHandle::GetValue)
FCFuncElement("SetValue", RuntimeFieldHandle::SetValue)
FCFuncElement("GetValueDirect", RuntimeFieldHandle::GetValueDirect)
FCFuncElement("SetValueDirect", RuntimeFieldHandle::SetValueDirect)
FCFuncElement("GetName", RuntimeFieldHandle::GetName)
FCFuncElement("_GetUtf8Name", RuntimeFieldHandle::GetUtf8Name)
FCFuncElement("MatchesNameHash", RuntimeFieldHandle::MatchesNameHash)
FCFuncElement("GetAttributes", RuntimeFieldHandle::GetAttributes)
FCFuncElement("GetApproxDeclaringType", RuntimeFieldHandle::GetApproxDeclaringType)
FCFuncElement("GetToken", RuntimeFieldHandle::GetToken)
FCFuncElement("GetStaticFieldForGenericType", RuntimeFieldHandle::GetStaticFieldForGenericType)
FCFuncElement("AcquiresContextFromThis", RuntimeFieldHandle::AcquiresContextFromThis)
FCFuncEnd()
FCFuncStart(gCOMModuleFuncs)
FCFuncElement("GetTypes", COMModule::GetTypes)
FCFuncEnd()
FCFuncStart(gCOMModuleHandleFuncs)
FCFuncElement("GetToken", ModuleHandle::GetToken)
FCFuncElement("GetDynamicMethod", ModuleHandle::GetDynamicMethod)
FCFuncElement("_GetMetadataImport", ModuleHandle::GetMetadataImport)
FCFuncElement("GetMDStreamVersion", ModuleHandle::GetMDStreamVersion)
FCFuncEnd()
FCFuncStart(gCustomAttributeEncodedArgument)
FCFuncElement("ParseAttributeArguments", Attribute::ParseAttributeArguments)
FCFuncEnd()
FCFuncStart(gCOMCustomAttributeFuncs)
FCFuncElement("_ParseAttributeUsageAttribute", COMCustomAttribute::ParseAttributeUsageAttribute)
FCFuncElement("_CreateCaObject", COMCustomAttribute::CreateCaObject)
FCFuncElement("_GetPropertyOrFieldData", COMCustomAttribute::GetPropertyOrFieldData)
FCFuncEnd()
FCFuncStart(gCompatibilitySwitchFuncs)
FCFuncElement("GetValueInternal", CompatibilitySwitch::GetValue)
FCFuncEnd()
FCFuncStart(gRuntimeAssemblyFuncs)
FCFuncElement("FCallIsDynamic", AssemblyNative::IsDynamic)
FCFuncElement("GetReferencedAssemblies", AssemblyNative::GetReferencedAssemblies)
FCFuncElement("GetManifestResourceNames", AssemblyNative::GetManifestResourceNames)
FCFuncElement("GetManifestModule", AssemblyHandle::GetManifestModule)
FCFuncElement("GetToken", AssemblyHandle::GetToken)
FCFuncEnd()
FCFuncStart(gAssemblyLoadContextFuncs)
FCFuncElement("GetLoadedAssemblies", AppDomainNative::GetLoadedAssemblies)
FCFuncElement("IsTracingEnabled", AssemblyNative::IsTracingEnabled)
FCFuncEnd()
FCFuncStart(gAssemblyBuilderFuncs)
FCFuncElement("GetInMemoryAssemblyModule", AssemblyNative::GetInMemoryAssemblyModule)
FCFuncEnd()
FCFuncStart(gDelegateFuncs)
FCFuncElement("BindToMethodName", COMDelegate::BindToMethodName)
FCFuncElement("BindToMethodInfo", COMDelegate::BindToMethodInfo)
FCFuncElement("GetMulticastInvoke", COMDelegate::GetMulticastInvoke)
FCFuncElement("GetInvokeMethod", COMDelegate::GetInvokeMethod)
FCFuncElement("InternalAlloc", COMDelegate::InternalAlloc)
FCFuncElement("InternalAllocLike", COMDelegate::InternalAllocLike)
FCFuncElement("InternalEqualTypes", COMDelegate::InternalEqualTypes)
FCFuncElement("InternalEqualMethodHandles", COMDelegate::InternalEqualMethodHandles)
FCFuncElement("FindMethodHandle", COMDelegate::FindMethodHandle)
FCFuncElement("AdjustTarget", COMDelegate::AdjustTarget)
FCFuncElement("GetCallStub", COMDelegate::GetCallStub)
FCFuncElement("CompareUnmanagedFunctionPtrs", COMDelegate::CompareUnmanagedFunctionPtrs)
// The FCall mechanism knows how to wire multiple different constructor calls into a
// single entrypoint, without the following entry. But we need this entry to satisfy
// frame creation within the body:
FCFuncElement("DelegateConstruct", COMDelegate::DelegateConstruct)
FCFuncEnd()
FCFuncStart(gMathFuncs)
FCFuncElement("Acos", COMDouble::Acos)
FCFuncElement("Acosh", COMDouble::Acosh)
FCFuncElement("Asin", COMDouble::Asin)
FCFuncElement("Asinh", COMDouble::Asinh)
FCFuncElement("Atan", COMDouble::Atan)
FCFuncElement("Atanh", COMDouble::Atanh)
FCFuncElement("Atan2", COMDouble::Atan2)
FCFuncElement("Cbrt", COMDouble::Cbrt)
FCFuncElement("Ceiling", COMDouble::Ceil)
FCFuncElement("Cos", COMDouble::Cos)
FCFuncElement("Cosh", COMDouble::Cosh)
FCFuncElement("Exp", COMDouble::Exp)
FCFuncElement("Floor", COMDouble::Floor)
FCFuncElement("FMod", COMDouble::FMod)
FCFuncElement("FusedMultiplyAdd", COMDouble::FusedMultiplyAdd)
FCFuncElement("Log", COMDouble::Log)
FCFuncElement("Log2", COMDouble::Log2)
FCFuncElement("Log10", COMDouble::Log10)
FCFuncElement("ModF", COMDouble::ModF)
FCFuncElement("Pow", COMDouble::Pow)
FCFuncElement("Sin", COMDouble::Sin)
FCFuncElement("SinCos", COMDouble::SinCos)
FCFuncElement("Sinh", COMDouble::Sinh)
FCFuncElement("Sqrt", COMDouble::Sqrt)
FCFuncElement("Tan", COMDouble::Tan)
FCFuncElement("Tanh", COMDouble::Tanh)
FCFuncEnd()
FCFuncStart(gMathFFuncs)
FCFuncElement("Acos", COMSingle::Acos)
FCFuncElement("Acosh", COMSingle::Acosh)
FCFuncElement("Asin", COMSingle::Asin)
FCFuncElement("Asinh", COMSingle::Asinh)
FCFuncElement("Atan", COMSingle::Atan)
FCFuncElement("Atanh", COMSingle::Atanh)
FCFuncElement("Atan2", COMSingle::Atan2)
FCFuncElement("Cbrt", COMSingle::Cbrt)
FCFuncElement("Ceiling", COMSingle::Ceil)
FCFuncElement("Cos", COMSingle::Cos)
FCFuncElement("Cosh", COMSingle::Cosh)
FCFuncElement("Exp", COMSingle::Exp)
FCFuncElement("Floor", COMSingle::Floor)
FCFuncElement("FMod", COMSingle::FMod)
FCFuncElement("FusedMultiplyAdd", COMSingle::FusedMultiplyAdd)
FCFuncElement("Log", COMSingle::Log)
FCFuncElement("Log2", COMSingle::Log2)
FCFuncElement("Log10", COMSingle::Log10)
FCFuncElement("ModF", COMSingle::ModF)
FCFuncElement("Pow", COMSingle::Pow)
FCFuncElement("Sin", COMSingle::Sin)
FCFuncElement("SinCos", COMSingle::SinCos)
FCFuncElement("Sinh", COMSingle::Sinh)
FCFuncElement("Sqrt", COMSingle::Sqrt)
FCFuncElement("Tan", COMSingle::Tan)
FCFuncElement("Tanh", COMSingle::Tanh)
FCFuncEnd()
FCFuncStart(gThreadFuncs)
FCFuncElement("InternalGetCurrentThread", GetThread)
#undef Sleep
FCFuncElement("SleepInternal", ThreadNative::Sleep)
#define Sleep(a) Dont_Use_Sleep(a)
FCFuncElement("Initialize", ThreadNative::Initialize)
FCFuncElement("SpinWaitInternal", ThreadNative::SpinWait)
FCFuncElement("GetCurrentThreadNative", ThreadNative::GetCurrentThread)
FCFuncElement("get_ManagedThreadId", ThreadNative::GetManagedThreadId)
FCFuncElement("InternalFinalize", ThreadNative::Finalize)
FCFuncElement("get_IsAlive", ThreadNative::IsAlive)
FCFuncElement("IsBackgroundNative", ThreadNative::IsBackground)
FCFuncElement("SetBackgroundNative", ThreadNative::SetBackground)
FCFuncElement("get_IsThreadPoolThread", ThreadNative::IsThreadpoolThread)
FCFuncElement("set_IsThreadPoolThread", ThreadNative::SetIsThreadpoolThread)
FCFuncElement("GetPriorityNative", ThreadNative::GetPriority)
FCFuncElement("SetPriorityNative", ThreadNative::SetPriority)
FCFuncElement("GetThreadStateNative", ThreadNative::GetThreadState)
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
FCFuncElement("GetApartmentStateNative", ThreadNative::GetApartmentState)
FCFuncElement("SetApartmentStateNative", ThreadNative::SetApartmentState)
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
#ifdef FEATURE_COMINTEROP
FCFuncElement("DisableComObjectEagerCleanup", ThreadNative::DisableComObjectEagerCleanup)
#endif // FEATURE_COMINTEROP
FCFuncElement("Interrupt", ThreadNative::Interrupt)
FCFuncElement("Join", ThreadNative::Join)
FCFuncElement("get_OptimalMaxSpinWaitsPerSpinIteration", ThreadNative::GetOptimalMaxSpinWaitsPerSpinIteration)
FCFuncElement("GetCurrentProcessorNumber", ThreadNative::GetCurrentProcessorNumber)
FCFuncEnd()
FCFuncStart(gThreadPoolFuncs)
FCFuncElement("GetNextConfigUInt32Value", ThreadPoolNative::GetNextConfigUInt32Value)
FCFuncElement("PostQueuedCompletionStatus", ThreadPoolNative::CorPostQueuedCompletionStatus)
FCFuncElement("GetAvailableThreadsNative", ThreadPoolNative::CorGetAvailableThreads)
FCFuncElement("CanSetMinIOCompletionThreads", ThreadPoolNative::CorCanSetMinIOCompletionThreads)
FCFuncElement("CanSetMaxIOCompletionThreads", ThreadPoolNative::CorCanSetMaxIOCompletionThreads)
FCFuncElement("SetMinThreadsNative", ThreadPoolNative::CorSetMinThreads)
FCFuncElement("GetMinThreadsNative", ThreadPoolNative::CorGetMinThreads)
FCFuncElement("GetThreadCount", ThreadPoolNative::GetThreadCount)
FCFuncElement("GetPendingUnmanagedWorkItemCount", ThreadPoolNative::GetPendingUnmanagedWorkItemCount)
FCFuncElement("RegisterWaitForSingleObjectNative", ThreadPoolNative::CorRegisterWaitForSingleObject)
FCFuncElement("BindIOCompletionCallbackNative", ThreadPoolNative::CorBindIoCompletionCallback)
FCFuncElement("SetMaxThreadsNative", ThreadPoolNative::CorSetMaxThreads)
FCFuncElement("GetMaxThreadsNative", ThreadPoolNative::CorGetMaxThreads)
FCFuncElement("NotifyWorkItemCompleteNative", ThreadPoolNative::NotifyRequestComplete)
FCFuncElement("NotifyWorkItemProgressNative", ThreadPoolNative::NotifyRequestProgress)
FCFuncElement("GetEnableWorkerTrackingNative", ThreadPoolNative::GetEnableWorkerTracking)
FCFuncElement("ReportThreadStatusNative", ThreadPoolNative::ReportThreadStatus)
FCFuncEnd()
FCFuncStart(gRegisteredWaitHandleFuncs)
FCFuncElement("UnregisterWaitNative", ThreadPoolNative::CorUnregisterWait)
FCFuncElement("WaitHandleCleanupNative", ThreadPoolNative::CorWaitHandleCleanupNative)
FCFuncEnd()
FCFuncStart(gWaitHandleFuncs)
FCFuncElement("WaitOneCore", WaitHandleNative::CorWaitOneNative)
FCFuncElement("WaitMultipleIgnoringSyncContext", WaitHandleNative::CorWaitMultipleNative)
FCFuncElement("SignalAndWaitNative", WaitHandleNative::CorSignalAndWaitOneNative)
FCFuncEnd()
#ifdef FEATURE_COMINTEROP
FCFuncStart(gVariantFuncs)
FCFuncElement("SetFieldsObject", COMVariant::SetFieldsObject)
FCFuncElement("BoxEnum", COMVariant::BoxEnum)
FCFuncEnd()
#endif // FEATURE_COMINTEROP
#ifdef FEATURE_COMINTEROP
FCFuncStart(gOAVariantFuncs)
FCFuncElement("ChangeTypeEx", COMOAVariant::ChangeTypeEx)
FCFuncEnd()
#endif // FEATURE_COMINTEROP
FCFuncStart(gCastHelpers)
FCFuncElement("IsInstanceOfAny_NoCacheLookup", ::IsInstanceOfAny_NoCacheLookup)
FCFuncElement("ChkCastAny_NoCacheLookup", ::ChkCastAny_NoCacheLookup)
FCFuncElement("Unbox_Helper", ::Unbox_Helper)
FCFuncElement("WriteBarrier", ::WriteBarrier_Helper)
FCFuncEnd()
FCFuncStart(gArrayFuncs)
FCFuncElement("GetCorElementTypeOfElementType", ArrayNative::GetCorElementTypeOfElementType)
FCFuncElement("Initialize", ArrayNative::Initialize)
FCFuncElement("IsSimpleCopy", ArrayNative::IsSimpleCopy)
FCFuncElement("CopySlow", ArrayNative::CopySlow)
FCFuncElement("InternalCreate", ArrayNative::CreateInstance)
FCFuncElement("InternalGetValue", ArrayNative::GetValue)
FCFuncElement("InternalSetValue", ArrayNative::SetValue)
FCFuncEnd()
FCFuncStart(gBufferFuncs)
FCFuncElement("__BulkMoveWithWriteBarrier", Buffer::BulkMoveWithWriteBarrier)
FCFuncEnd()
FCFuncStart(gGCInterfaceFuncs)
FCFuncElement("GetGenerationWR", GCInterface::GetGenerationWR)
FCFuncElement("_RegisterForFullGCNotification", GCInterface::RegisterForFullGCNotification)
FCFuncElement("_CancelFullGCNotification", GCInterface::CancelFullGCNotification)
FCFuncElement("_WaitForFullGCApproach", GCInterface::WaitForFullGCApproach)
FCFuncElement("_WaitForFullGCComplete", GCInterface::WaitForFullGCComplete)
FCFuncElement("_CollectionCount", GCInterface::CollectionCount)
FCFuncElement("GetMemoryInfo", GCInterface::GetMemoryInfo)
FCFuncElement("GetMemoryLoad", GCInterface::GetMemoryLoad)
FCFuncElement("GetSegmentSize", GCInterface::GetSegmentSize)
FCFuncElement("GetLastGCPercentTimeInGC", GCInterface::GetLastGCPercentTimeInGC)
FCFuncElement("GetGenerationSize", GCInterface::GetGenerationSize)
FCFuncElement("GetGeneration", GCInterface::GetGeneration)
FCFuncElement("GetMaxGeneration", GCInterface::GetMaxGeneration)
FCFuncElement("_SuppressFinalize", GCInterface::SuppressFinalize)
FCFuncElement("_ReRegisterForFinalize", GCInterface::ReRegisterForFinalize)
FCFuncElement("GetAllocatedBytesForCurrentThread", GCInterface::GetAllocatedBytesForCurrentThread)
FCFuncElement("GetTotalAllocatedBytes", GCInterface::GetTotalAllocatedBytes)
FCFuncElement("AllocateNewArray", GCInterface::AllocateNewArray)
FCFuncEnd()
FCFuncStart(gGCSettingsFuncs)
FCFuncElement("get_IsServerGC", SystemNative::IsServerGC)
FCFuncElement("GetGCLatencyMode", GCInterface::GetGcLatencyMode)
FCFuncElement("GetLOHCompactionMode", GCInterface::GetLOHCompactionMode)
FCFuncElement("SetGCLatencyMode", GCInterface::SetGcLatencyMode)
FCFuncElement("SetLOHCompactionMode", GCInterface::SetLOHCompactionMode)
FCFuncEnd()
FCFuncStart(gInteropMarshalFuncs)
FCFuncElement("GetLastPInvokeError", MarshalNative::GetLastPInvokeError)
FCFuncElement("SetLastPInvokeError", MarshalNative::SetLastPInvokeError)
FCFuncElement("SizeOfHelper", MarshalNative::SizeOfClass)
FCFuncElement("StructureToPtr", MarshalNative::StructureToPtr)
FCFuncElement("PtrToStructureHelper", MarshalNative::PtrToStructureHelper)
FCFuncElement("DestroyStructure", MarshalNative::DestroyStructure)
FCFuncElement("IsPinnable", MarshalNative::IsPinnable)
FCFuncElement("GetExceptionCode", ExceptionNative::GetExceptionCode)
FCFuncElement("GetExceptionPointers", ExceptionNative::GetExceptionPointers)
FCFuncElement("OffsetOfHelper", MarshalNative::OffsetOfHelper)
FCFuncElement("GetExceptionForHRInternal", MarshalNative::GetExceptionForHR)
FCFuncElement("GetDelegateForFunctionPointerInternal", MarshalNative::GetDelegateForFunctionPointerInternal)
FCFuncElement("GetFunctionPointerForDelegateInternal", MarshalNative::GetFunctionPointerForDelegateInternal)
#ifdef FEATURE_COMINTEROP
FCFuncElement("GetHRForException", MarshalNative::GetHRForException)
FCFuncElement("GetObjectForIUnknownNative", MarshalNative::GetObjectForIUnknownNative)
FCFuncElement("GetUniqueObjectForIUnknownNative", MarshalNative::GetUniqueObjectForIUnknownNative)
FCFuncElement("GetNativeVariantForObjectNative", MarshalNative::GetNativeVariantForObjectNative)
FCFuncElement("GetObjectForNativeVariantNative", MarshalNative::GetObjectForNativeVariantNative)
FCFuncElement("InternalFinalReleaseComObject", MarshalNative::FinalReleaseComObject)
FCFuncElement("IsTypeVisibleFromCom", MarshalNative::IsTypeVisibleFromCom)
FCFuncElement("CreateAggregatedObjectNative", MarshalNative::CreateAggregatedObjectNative)
FCFuncElement("AreComObjectsAvailableForCleanup", MarshalNative::AreComObjectsAvailableForCleanup)
FCFuncElement("InternalCreateWrapperOfType", MarshalNative::InternalCreateWrapperOfType)
FCFuncElement("GetObjectsForNativeVariantsNative", MarshalNative::GetObjectsForNativeVariantsNative)
FCFuncElement("GetStartComSlot", MarshalNative::GetStartComSlot)
FCFuncElement("GetEndComSlot", MarshalNative::GetEndComSlot)
FCFuncElement("GetIUnknownForObjectNative", MarshalNative::GetIUnknownForObjectNative)
FCFuncElement("GetIDispatchForObjectNative", MarshalNative::GetIDispatchForObjectNative)
FCFuncElement("GetComInterfaceForObjectNative", MarshalNative::GetComInterfaceForObjectNative)
FCFuncElement("InternalReleaseComObject", MarshalNative::ReleaseComObject)
FCFuncElement("GetTypedObjectForIUnknown", MarshalNative::GetTypedObjectForIUnknown)
FCFuncElement("ChangeWrapperHandleStrength", MarshalNative::ChangeWrapperHandleStrength)
FCFuncElement("CleanupUnusedObjectsInCurrentContext", MarshalNative::CleanupUnusedObjectsInCurrentContext)
#endif // FEATURE_COMINTEROP
FCFuncEnd()
FCFuncStart(gMissingMemberExceptionFuncs)
FCFuncElement("FormatSignature", MissingMemberException_FormatSignature)
FCFuncEnd()
FCFuncStart(gInterlockedFuncs)
FCFuncElementSig("Exchange", &gsig_SM_RefInt_Int_RetInt, COMInterlocked::Exchange)
FCFuncElementSig("Exchange", &gsig_SM_RefLong_Long_RetLong, COMInterlocked::Exchange64)
FCFuncElementSig("Exchange", &gsig_SM_RefDbl_Dbl_RetDbl, COMInterlocked::ExchangeDouble)
FCFuncElementSig("Exchange", &gsig_SM_RefFlt_Flt_RetFlt, COMInterlocked::ExchangeFloat)
FCFuncElementSig("Exchange", &gsig_SM_RefObj_Obj_RetObj, COMInterlocked::ExchangeObject)
FCFuncElementSig("CompareExchange", &gsig_SM_RefInt_Int_Int_RetInt, COMInterlocked::CompareExchange)
FCFuncElementSig("CompareExchange", &gsig_SM_RefLong_Long_Long_RetLong, COMInterlocked::CompareExchange64)
FCFuncElementSig("CompareExchange", &gsig_SM_RefDbl_Dbl_Dbl_RetDbl, COMInterlocked::CompareExchangeDouble)
FCFuncElementSig("CompareExchange", &gsig_SM_RefFlt_Flt_Flt_RetFlt, COMInterlocked::CompareExchangeFloat)
FCFuncElementSig("CompareExchange", &gsig_SM_RefObj_Obj_Obj_RetObj, COMInterlocked::CompareExchangeObject)
FCFuncElementSig("ExchangeAdd", &gsig_SM_RefInt_Int_RetInt, COMInterlocked::ExchangeAdd32)
FCFuncElementSig("ExchangeAdd", &gsig_SM_RefLong_Long_RetLong, COMInterlocked::ExchangeAdd64)
FCFuncElement("MemoryBarrier", COMInterlocked::FCMemoryBarrier)
FCFuncElement("ReadMemoryBarrier", COMInterlocked::FCMemoryBarrierLoad)
FCFuncEnd()
FCFuncStart(gJitInfoFuncs)
FCFuncElement("GetCompiledILBytes", GetCompiledILBytes)
FCFuncElement("GetCompiledMethodCount", GetCompiledMethodCount)
FCFuncElement("GetCompilationTimeInTicks", GetCompilationTimeInTicks)
FCFuncEnd()
FCFuncStart(gVarArgFuncs)
FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_IntPtr_PtrVoid_RetVoid, VarArgsNative::Init2)
FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_IntPtr_RetVoid, VarArgsNative::Init)
FCFuncElement("GetRemainingCount", VarArgsNative::GetRemainingCount)
FCFuncElement("_GetNextArgType", VarArgsNative::GetNextArgType)
FCFuncElement("FCallGetNextArg", VarArgsNative::DoGetNextArg)
FCFuncElement("InternalGetNextArg", VarArgsNative::GetNextArg2)
FCFuncEnd()
FCFuncStart(gMonitorFuncs)
FCFuncElement("Enter", JIT_MonEnter)
FCFuncElement("ReliableEnter", JIT_MonReliableEnter)
FCFuncElement("ReliableEnterTimeout", JIT_MonTryEnter)
FCFuncElement("Exit", JIT_MonExit)
FCFuncElement("ObjWait", ObjectNative::WaitTimeout)
FCFuncElement("ObjPulse", ObjectNative::Pulse)
FCFuncElement("ObjPulseAll", ObjectNative::PulseAll)
FCFuncElement("IsEnteredNative", ObjectNative::IsLockHeld)
FCFuncEnd()
FCFuncStart(gOverlappedFuncs)
FCFuncElement("AllocateNativeOverlapped", AllocateNativeOverlapped)
FCFuncElement("FreeNativeOverlapped", FreeNativeOverlapped)
FCFuncElement("CheckVMForIOPacket", CheckVMForIOPacket)
FCFuncElement("GetOverlappedFromNative", GetOverlappedFromNative)
FCFuncEnd()
FCFuncStart(gRuntimeHelpers)
FCFuncElement("GetObjectValue", ObjectNative::GetObjectValue)
FCFuncElement("InitializeArray", ArrayNative::InitializeArray)
FCFuncElement("GetSpanDataFrom", ArrayNative::GetSpanDataFrom)
FCFuncElement("PrepareDelegate", ReflectionInvocation::PrepareDelegate)
FCFuncElement("GetHashCode", ObjectNative::GetHashCode)
FCFuncElement("Equals", ObjectNative::Equals)
FCFuncElement("AllocateUninitializedClone", ObjectNative::AllocateUninitializedClone)
FCFuncElement("EnsureSufficientExecutionStack", ReflectionInvocation::EnsureSufficientExecutionStack)
FCFuncElement("TryEnsureSufficientExecutionStack", ReflectionInvocation::TryEnsureSufficientExecutionStack)
FCFuncElement("AllocTailCallArgBuffer", TailCallHelp::AllocTailCallArgBuffer)
FCFuncElement("GetTailCallInfo", TailCallHelp::GetTailCallInfo)
FCFuncEnd()
FCFuncStart(gMngdFixedArrayMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdFixedArrayMarshaler::CreateMarshaler)
FCFuncElement("ConvertSpaceToNative", MngdFixedArrayMarshaler::ConvertSpaceToNative)
FCFuncElement("ConvertContentsToNative", MngdFixedArrayMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertSpaceToManaged", MngdFixedArrayMarshaler::ConvertSpaceToManaged)
FCFuncElement("ConvertContentsToManaged", MngdFixedArrayMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNativeContents", MngdFixedArrayMarshaler::ClearNativeContents)
FCFuncEnd()
FCFuncStart(gMngdNativeArrayMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdNativeArrayMarshaler::CreateMarshaler)
FCFuncElement("ConvertSpaceToNative", MngdNativeArrayMarshaler::ConvertSpaceToNative)
FCFuncElement("ConvertContentsToNative", MngdNativeArrayMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertSpaceToManaged", MngdNativeArrayMarshaler::ConvertSpaceToManaged)
FCFuncElement("ConvertContentsToManaged", MngdNativeArrayMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNative", MngdNativeArrayMarshaler::ClearNative)
FCFuncElement("ClearNativeContents", MngdNativeArrayMarshaler::ClearNativeContents)
FCFuncEnd()
#ifdef FEATURE_COMINTEROP
FCFuncStart(gObjectMarshalerFuncs)
FCFuncElement("ConvertToNative", StubHelpers::ObjectMarshaler__ConvertToNative)
FCFuncElement("ConvertToManaged", StubHelpers::ObjectMarshaler__ConvertToManaged)
FCFuncElement("ClearNative", StubHelpers::ObjectMarshaler__ClearNative)
FCFuncEnd()
FCFuncStart(gInterfaceMarshalerFuncs)
FCFuncElement("ConvertToNative", StubHelpers::InterfaceMarshaler__ConvertToNative)
FCFuncElement("ConvertToManaged", StubHelpers::InterfaceMarshaler__ConvertToManaged)
FCFuncEnd()
FCFuncStart(gMngdSafeArrayMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdSafeArrayMarshaler::CreateMarshaler)
FCFuncElement("ConvertSpaceToNative", MngdSafeArrayMarshaler::ConvertSpaceToNative)
FCFuncElement("ConvertContentsToNative", MngdSafeArrayMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertSpaceToManaged", MngdSafeArrayMarshaler::ConvertSpaceToManaged)
FCFuncElement("ConvertContentsToManaged", MngdSafeArrayMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNative", MngdSafeArrayMarshaler::ClearNative)
FCFuncEnd()
#endif // FEATURE_COMINTEROP
FCFuncStart(gMngdRefCustomMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdRefCustomMarshaler::CreateMarshaler)
FCFuncElement("ConvertContentsToNative", MngdRefCustomMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertContentsToManaged", MngdRefCustomMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNative", MngdRefCustomMarshaler::ClearNative)
FCFuncElement("ClearManaged", MngdRefCustomMarshaler::ClearManaged)
FCFuncEnd()
FCFuncStart(gStubHelperFuncs)
FCFuncElement("GetNDirectTarget", StubHelpers::GetNDirectTarget)
FCFuncElement("GetDelegateTarget", StubHelpers::GetDelegateTarget)
FCFuncElement("SetLastError", StubHelpers::SetLastError)
FCFuncElement("ClearLastError", StubHelpers::ClearLastError)
FCFuncElement("ThrowInteropParamException", StubHelpers::ThrowInteropParamException)
FCFuncElement("InternalGetHRExceptionObject", StubHelpers::GetHRExceptionObject)
#ifdef FEATURE_COMINTEROP
FCFuncElement("InternalGetCOMHRExceptionObject", StubHelpers::GetCOMHRExceptionObject)
FCFuncElement("GetCOMIPFromRCW", StubHelpers::GetCOMIPFromRCW)
#endif // FEATURE_COMINTEROP
#ifdef PROFILING_SUPPORTED
FCFuncElement("ProfilerBeginTransitionCallback", StubHelpers::ProfilerBeginTransitionCallback)
FCFuncElement("ProfilerEndTransitionCallback", StubHelpers::ProfilerEndTransitionCallback)
#endif
FCFuncElement("CreateCustomMarshalerHelper", StubHelpers::CreateCustomMarshalerHelper)
FCFuncElement("FmtClassUpdateNativeInternal", StubHelpers::FmtClassUpdateNativeInternal)
FCFuncElement("FmtClassUpdateCLRInternal", StubHelpers::FmtClassUpdateCLRInternal)
FCFuncElement("LayoutDestroyNativeInternal", StubHelpers::LayoutDestroyNativeInternal)
FCFuncElement("AllocateInternal", StubHelpers::AllocateInternal)
FCFuncElement("MarshalToUnmanagedVaListInternal", StubHelpers::MarshalToUnmanagedVaListInternal)
FCFuncElement("MarshalToManagedVaListInternal", StubHelpers::MarshalToManagedVaListInternal)
FCFuncElement("CalcVaListSize", StubHelpers::CalcVaListSize)
FCFuncElement("ValidateObject", StubHelpers::ValidateObject)
FCFuncElement("ValidateByref", StubHelpers::ValidateByref)
FCFuncElement("LogPinnedArgument", StubHelpers::LogPinnedArgument)
FCFuncElement("GetStubContext", StubHelpers::GetStubContext)
#ifdef FEATURE_ARRAYSTUB_AS_IL
FCFuncElement("ArrayTypeCheck", StubHelpers::ArrayTypeCheck)
#endif //FEATURE_ARRAYSTUB_AS_IL
#ifdef FEATURE_MULTICASTSTUB_AS_IL
FCFuncElement("MulticastDebuggerTraceHelper", StubHelpers::MulticastDebuggerTraceHelper)
#endif //FEATURE_MULTICASTSTUB_AS_IL
FCFuncElement("NextCallReturnAddress", StubHelpers::NextCallReturnAddress)
FCFuncEnd()
FCFuncStart(gGCHandleFuncs)
FCFuncElement("InternalAlloc", MarshalNative::GCHandleInternalAlloc)
FCFuncElement("InternalFree", MarshalNative::GCHandleInternalFree)
FCFuncElement("InternalGet", MarshalNative::GCHandleInternalGet)
FCFuncElement("InternalSet", MarshalNative::GCHandleInternalSet)
FCFuncElement("InternalCompareExchange", MarshalNative::GCHandleInternalCompareExchange)
FCFuncEnd()
FCFuncStart(gStreamFuncs)
FCFuncElement("HasOverriddenBeginEndRead", StreamNative::HasOverriddenBeginEndRead)
FCFuncElement("HasOverriddenBeginEndWrite", StreamNative::HasOverriddenBeginEndWrite)
FCFuncEnd()
FCFuncStart(gWeakReferenceFuncs)
FCFuncElement("Create", WeakReferenceNative::Create)
FCFuncElement("Finalize", WeakReferenceNative::Finalize)
FCFuncElement("get_Target", WeakReferenceNative::GetTarget)
FCFuncElement("set_Target", WeakReferenceNative::SetTarget)
FCFuncElement("get_IsAlive", WeakReferenceNative::IsAlive)
FCFuncElement("IsTrackResurrection", WeakReferenceNative::IsTrackResurrection)
FCFuncEnd()
FCFuncStart(gWeakReferenceOfTFuncs)
FCFuncElement("Create", WeakReferenceOfTNative::Create)
FCFuncElement("Finalize", WeakReferenceOfTNative::Finalize)
FCFuncElement("get_Target", WeakReferenceOfTNative::GetTarget)
FCFuncElement("set_Target", WeakReferenceOfTNative::SetTarget)
FCFuncElement("IsTrackResurrection", WeakReferenceOfTNative::IsTrackResurrection)
FCFuncEnd()
#ifdef FEATURE_COMINTEROP
//
// ECall helpers for the standard managed interfaces.
//
#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
FCFuncStart(g##FriendlyName##Funcs)
#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, FCallMethName, MethName, MethSig, FcallDecl) \
FCUnreferenced FCFuncElementSig(#MethName, MethSig, FriendlyName::FCallMethName)
#define MNGSTDITF_END_INTERFACE(FriendlyName) \
FCFuncEnd()
#include "mngstditflist.h"
#undef MNGSTDITF_BEGIN_INTERFACE
#undef MNGSTDITF_DEFINE_METH_IMPL
#undef MNGSTDITF_END_INTERFACE
#endif // FEATURE_COMINTEROP
//
//
// Class definitions
//
//
// Note these have to remain sorted by name:namespace pair (Assert will wack you if you don't)
// The sorting is case-sensitive
FCClassElement("ArgIterator", "System", gVarArgFuncs)
FCClassElement("Array", "System", gArrayFuncs)
FCClassElement("AssemblyBuilder", "System.Reflection.Emit", gAssemblyBuilderFuncs)
FCClassElement("AssemblyLoadContext", "System.Runtime.Loader", gAssemblyLoadContextFuncs)
FCClassElement("Buffer", "System", gBufferFuncs)
FCClassElement("CastHelpers", "System.Runtime.CompilerServices", gCastHelpers)
FCClassElement("CompatibilitySwitch", "System.Runtime.Versioning", gCompatibilitySwitchFuncs)
FCClassElement("CustomAttribute", "System.Reflection", gCOMCustomAttributeFuncs)
FCClassElement("CustomAttributeEncodedArgument", "System.Reflection", gCustomAttributeEncodedArgument)
FCClassElement("Debugger", "System.Diagnostics", gDiagnosticsDebugger)
FCClassElement("Delegate", "System", gDelegateFuncs)
FCClassElement("DependentHandle", "System.Runtime", gDependentHandleFuncs)
FCClassElement("Enum", "System", gEnumFuncs)
FCClassElement("Environment", "System", gEnvironmentFuncs)
FCClassElement("Exception", "System", gExceptionFuncs)
FCClassElement("GC", "System", gGCInterfaceFuncs)
FCClassElement("GCHandle", "System.Runtime.InteropServices", gGCHandleFuncs)
FCClassElement("GCSettings", "System.Runtime", gGCSettingsFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("IEnumerable", "System.Collections", gStdMngIEnumerableFuncs)
FCClassElement("IEnumerator", "System.Collections", gStdMngIEnumeratorFuncs)
FCClassElement("IReflect", "System.Reflection", gStdMngIReflectFuncs)
FCClassElement("InterfaceMarshaler", "System.StubHelpers", gInterfaceMarshalerFuncs)
#endif
FCClassElement("Interlocked", "System.Threading", gInterlockedFuncs)
FCClassElement("JitInfo", "System.Runtime", gJitInfoFuncs)
FCClassElement("Marshal", "System.Runtime.InteropServices", gInteropMarshalFuncs)
FCClassElement("Math", "System", gMathFuncs)
FCClassElement("MathF", "System", gMathFFuncs)
FCClassElement("MetadataImport", "System.Reflection", gMetaDataImport)
FCClassElement("MissingMemberException", "System", gMissingMemberExceptionFuncs)
FCClassElement("MngdFixedArrayMarshaler", "System.StubHelpers", gMngdFixedArrayMarshalerFuncs)
FCClassElement("MngdNativeArrayMarshaler", "System.StubHelpers", gMngdNativeArrayMarshalerFuncs)
FCClassElement("MngdRefCustomMarshaler", "System.StubHelpers", gMngdRefCustomMarshalerFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("MngdSafeArrayMarshaler", "System.StubHelpers", gMngdSafeArrayMarshalerFuncs)
#endif // FEATURE_COMINTEROP
FCClassElement("ModuleHandle", "System", gCOMModuleHandleFuncs)
FCClassElement("Monitor", "System.Threading", gMonitorFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("OAVariantLib", "Microsoft.Win32", gOAVariantFuncs)
#endif
FCClassElement("Object", "System", gObjectFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("ObjectMarshaler", "System.StubHelpers", gObjectMarshalerFuncs)
#endif
FCClassElement("OverlappedData", "System.Threading", gOverlappedFuncs)
FCClassElement("RegisteredWaitHandle", "System.Threading", gRegisteredWaitHandleFuncs)
FCClassElement("RuntimeAssembly", "System.Reflection", gRuntimeAssemblyFuncs)
FCClassElement("RuntimeFieldHandle", "System", gCOMFieldHandleNewFuncs)
FCClassElement("RuntimeHelpers", "System.Runtime.CompilerServices", gRuntimeHelpers)
FCClassElement("RuntimeMethodHandle", "System", gRuntimeMethodHandle)
FCClassElement("RuntimeModule", "System.Reflection", gCOMModuleFuncs)
FCClassElement("RuntimeType", "System", gSystem_RuntimeType)
FCClassElement("RuntimeTypeHandle", "System", gCOMTypeHandleFuncs)
FCClassElement("Signature", "System", gSignatureNative)
FCClassElement("StackTrace", "System.Diagnostics", gDiagnosticsStackTrace)
FCClassElement("Stream", "System.IO", gStreamFuncs)
FCClassElement("String", "System", gStringFuncs)
FCClassElement("StubHelpers", "System.StubHelpers", gStubHelperFuncs)
FCClassElement("Thread", "System.Threading", gThreadFuncs)
FCClassElement("ThreadPool", "System.Threading", gThreadPoolFuncs)
FCClassElement("Type", "System", gSystem_Type)
FCClassElement("TypedReference", "System", gTypedReferenceFuncs)
FCClassElement("ValueType", "System", gValueTypeFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("Variant", "System", gVariantFuncs)
#endif
FCClassElement("WaitHandle", "System.Threading", gWaitHandleFuncs)
FCClassElement("WeakReference", "System", gWeakReferenceFuncs)
FCClassElement("WeakReference`1", "System", gWeakReferenceOfTFuncs)
#undef FCFuncElement
#undef FCFuncElementSig
#undef FCDynamic
#undef FCDynamicSig
#undef FCUnreferenced
#undef FCFuncStart
#undef FCFuncEnd
#undef FCClassElement
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ECallList.H
//
// This file contains definitions of FCall entrypoints
//
#ifndef FCFuncElement
#define FCFuncElement(name, impl)
#endif
#ifndef FCFuncElementSig
#define FCFuncElementSig(name,sig,impl)
#endif
#ifndef FCDynamic
#define FCDynamic(name,dynamicID)
#endif
#ifndef FCDynamicSig
#define FCDynamicSig(name,sig,dynamicID)
#endif
#ifndef FCUnreferenced
#define FCUnreferenced
#endif
#ifndef FCFuncStart
#define FCFuncStart(name)
#endif
#ifndef FCFuncEnd
#define FCFuncEnd()
#endif
#ifndef FCClassElement
#define FCClassElement(name,namespace,funcs)
#endif
//
//
// Entrypoint definitions
//
//
FCFuncStart(gDependentHandleFuncs)
FCFuncElement("InternalInitialize", DependentHandle::InternalInitialize)
FCFuncElement("InternalGetTarget", DependentHandle::InternalGetTarget)
FCFuncElement("InternalGetDependent", DependentHandle::InternalGetDependent)
FCFuncElement("InternalGetTargetAndDependent", DependentHandle::InternalGetTargetAndDependent)
FCFuncElement("InternalSetTargetToNull", DependentHandle::InternalSetTargetToNull)
FCFuncElement("InternalSetDependent", DependentHandle::InternalSetDependent)
FCFuncElement("InternalFree", DependentHandle::InternalFree)
FCFuncEnd()
FCFuncStart(gEnumFuncs)
FCFuncElement("InternalGetUnderlyingType", ReflectionEnum::InternalGetEnumUnderlyingType)
FCFuncElement("InternalGetCorElementType", ReflectionEnum::InternalGetCorElementType)
FCFuncElement("InternalBoxEnum", ReflectionEnum::InternalBoxEnum)
FCFuncEnd()
FCFuncStart(gObjectFuncs)
FCFuncElement("GetType", ObjectNative::GetClass)
FCFuncEnd()
FCFuncStart(gStringFuncs)
FCDynamic("FastAllocateString", ECall::FastAllocateString)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ArrChar_RetVoid, ECall::CtorCharArrayManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ArrChar_Int_Int_RetVoid, ECall::CtorCharArrayStartLengthManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_RetVoid, ECall::CtorCharPtrManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_Int_Int_RetVoid, ECall::CtorCharPtrStartLengthManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_Char_Int_RetVoid, ECall::CtorCharCountManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ReadOnlySpanOfChar_RetVoid, ECall::CtorReadOnlySpanOfCharManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_RetVoid, ECall::CtorSBytePtrManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_RetVoid, ECall::CtorSBytePtrStartLengthManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_Encoding_RetVoid, ECall::CtorSBytePtrStartLengthEncodingManaged)
FCFuncElement("SetTrailByte", COMString::FCSetTrailByte)
FCFuncElement("TryGetTrailByte", COMString::FCTryGetTrailByte)
FCFuncElement("IsInterned", AppDomainNative::IsStringInterned)
FCFuncElement("Intern", AppDomainNative::GetOrInternString)
FCFuncEnd()
FCFuncStart(gValueTypeFuncs)
FCFuncElement("CanCompareBits", ValueTypeHelper::CanCompareBits)
FCFuncElement("FastEqualsCheck", ValueTypeHelper::FastEqualsCheck)
FCFuncElement("GetHashCode", ValueTypeHelper::GetHashCode)
FCFuncElement("GetHashCodeOfPtr", ValueTypeHelper::GetHashCodeOfPtr)
FCFuncEnd()
FCFuncStart(gDiagnosticsDebugger)
FCFuncElement("BreakInternal", DebugDebugger::Break)
FCFuncElement("get_IsAttached", DebugDebugger::IsDebuggerAttached)
FCFuncElement("IsLogging", DebugDebugger::IsLogging)
FCFuncElement("CustomNotification", DebugDebugger::CustomNotification)
FCFuncEnd()
FCFuncStart(gDiagnosticsStackTrace)
FCFuncElement("GetStackFramesInternal", DebugStackTrace::GetStackFramesInternal)
FCFuncEnd()
FCFuncStart(gEnvironmentFuncs)
FCFuncElement("get_CurrentManagedThreadId", JIT_GetCurrentManagedThreadId)
FCFuncElement("get_TickCount", SystemNative::GetTickCount)
FCFuncElement("get_TickCount64", SystemNative::GetTickCount64)
FCFuncElement("set_ExitCode", SystemNative::SetExitCode)
FCFuncElement("get_ExitCode", SystemNative::GetExitCode)
FCFuncElement("GetCommandLineArgsNative", SystemNative::GetCommandLineArgs)
FCFuncElementSig("FailFast", &gsig_SM_Str_RetVoid, SystemNative::FailFast)
FCFuncElementSig("FailFast", &gsig_SM_Str_Exception_RetVoid, SystemNative::FailFastWithException)
FCFuncElementSig("FailFast", &gsig_SM_Str_Exception_Str_RetVoid, SystemNative::FailFastWithExceptionAndSource)
FCFuncEnd()
FCFuncStart(gExceptionFuncs)
FCFuncElement("IsImmutableAgileException", ExceptionNative::IsImmutableAgileException)
FCFuncElement("GetMethodFromStackTrace", SystemNative::GetMethodFromStackTrace)
FCFuncElement("PrepareForForeignExceptionRaise", ExceptionNative::PrepareForForeignExceptionRaise)
FCFuncElement("GetStackTracesDeepCopy", ExceptionNative::GetStackTracesDeepCopy)
FCFuncElement("SaveStackTracesFromDeepCopy", ExceptionNative::SaveStackTracesFromDeepCopy)
FCFuncElement("GetExceptionCount", ExceptionNative::GetExceptionCount)
FCFuncEnd()
FCFuncStart(gTypedReferenceFuncs)
FCFuncElement("InternalToObject", ReflectionInvocation::TypedReferenceToObject)
FCFuncElement("InternalMakeTypedReference", ReflectionInvocation::MakeTypedReference)
FCFuncEnd()
FCFuncStart(gSystem_Type)
FCFuncElement("GetTypeFromHandle", RuntimeTypeHandle::GetTypeFromHandle)
FCFuncElement("GetTypeFromHandleUnsafe", RuntimeTypeHandle::GetRuntimeType)
FCFuncEnd()
FCFuncStart(gSystem_RuntimeType)
FCFuncElement("GetGUID", ReflectionInvocation::GetGUID)
FCFuncElement("_CreateEnum", ReflectionInvocation::CreateEnum)
FCFuncElement("CanValueSpecialCast", ReflectionInvocation::CanValueSpecialCast)
FCFuncElement("AllocateValueType", ReflectionInvocation::AllocateValueType)
#if defined(FEATURE_COMINTEROP)
FCFuncElement("InvokeDispMethod", ReflectionInvocation::InvokeDispMethod)
#endif // defined(FEATURE_COMINTEROP)
FCFuncEnd()
FCFuncStart(gCOMTypeHandleFuncs)
FCFuncElement("IsInstanceOfType", RuntimeTypeHandle::IsInstanceOfType)
FCFuncElement("GetDeclaringMethod", RuntimeTypeHandle::GetDeclaringMethod)
FCFuncElement("GetDeclaringType", RuntimeTypeHandle::GetDeclaringType)
FCFuncElement("GetFirstIntroducedMethod", RuntimeTypeHandle::GetFirstIntroducedMethod)
FCFuncElement("GetNextIntroducedMethod", RuntimeTypeHandle::GetNextIntroducedMethod)
FCFuncElement("GetCorElementType", RuntimeTypeHandle::GetCorElementType)
FCFuncElement("GetAssembly", RuntimeTypeHandle::GetAssembly)
FCFuncElement("GetModule", RuntimeTypeHandle::GetModule)
FCFuncElement("GetBaseType", RuntimeTypeHandle::GetBaseType)
FCFuncElement("GetElementType", RuntimeTypeHandle::GetElementType)
FCFuncElement("GetArrayRank", RuntimeTypeHandle::GetArrayRank)
FCFuncElement("GetToken", RuntimeTypeHandle::GetToken)
FCFuncElement("_GetUtf8Name", RuntimeTypeHandle::GetUtf8Name)
FCFuncElement("GetMethodAt", RuntimeTypeHandle::GetMethodAt)
FCFuncElement("GetFields", RuntimeTypeHandle::GetFields)
FCFuncElement("GetInterfaces", RuntimeTypeHandle::GetInterfaces)
FCFuncElement("GetAttributes", RuntimeTypeHandle::GetAttributes)
FCFuncElement("_GetMetadataImport", RuntimeTypeHandle::GetMetadataImport)
FCFuncElement("GetNumVirtuals", RuntimeTypeHandle::GetNumVirtuals)
FCFuncElement("GetNumVirtualsAndStaticVirtuals", RuntimeTypeHandle::GetNumVirtualsAndStaticVirtuals)
FCFuncElement("IsValueType", RuntimeTypeHandle::IsValueType)
FCFuncElement("IsInterface", RuntimeTypeHandle::IsInterface)
FCFuncElement("IsByRefLike", RuntimeTypeHandle::IsByRefLike)
FCFuncElement("CanCastTo", RuntimeTypeHandle::CanCastTo)
FCFuncElement("HasInstantiation", RuntimeTypeHandle::HasInstantiation)
FCFuncElement("GetGenericVariableIndex", RuntimeTypeHandle::GetGenericVariableIndex)
FCFuncElement("IsGenericVariable", RuntimeTypeHandle::IsGenericVariable)
FCFuncElement("IsGenericTypeDefinition", RuntimeTypeHandle::IsGenericTypeDefinition)
FCFuncElement("ContainsGenericVariables", RuntimeTypeHandle::ContainsGenericVariables)
FCFuncElement("SatisfiesConstraints", RuntimeTypeHandle::SatisfiesConstraints)
#ifdef FEATURE_COMINTEROP
FCFuncElement("AllocateComObject", RuntimeTypeHandle::AllocateComObject)
#endif // FEATURE_COMINTEROP
FCFuncElement("CompareCanonicalHandles", RuntimeTypeHandle::CompareCanonicalHandles)
FCFuncElement("GetValueInternal", RuntimeTypeHandle::GetValueInternal)
FCFuncElement("IsEquivalentTo", RuntimeTypeHandle::IsEquivalentTo)
FCFuncEnd()
FCFuncStart(gMetaDataImport)
FCFuncElement("_GetDefaultValue", MetaDataImport::GetDefaultValue)
FCFuncElement("_GetName", MetaDataImport::GetName)
FCFuncElement("_GetUserString", MetaDataImport::GetUserString)
FCFuncElement("_GetScopeProps", MetaDataImport::GetScopeProps)
FCFuncElement("_GetClassLayout", MetaDataImport::GetClassLayout)
FCFuncElement("_GetSignatureFromToken", MetaDataImport::GetSignatureFromToken)
FCFuncElement("_GetNamespace", MetaDataImport::GetNamespace)
FCFuncElement("_GetEventProps", MetaDataImport::GetEventProps)
FCFuncElement("_GetFieldDefProps", MetaDataImport::GetFieldDefProps)
FCFuncElement("_GetPropertyProps", MetaDataImport::GetPropertyProps)
FCFuncElement("_GetParentToken", MetaDataImport::GetParentToken)
FCFuncElement("_GetParamDefProps", MetaDataImport::GetParamDefProps)
FCFuncElement("_GetGenericParamProps", MetaDataImport::GetGenericParamProps)
FCFuncElement("_Enum", MetaDataImport::Enum)
FCFuncElement("_GetMemberRefProps", MetaDataImport::GetMemberRefProps)
FCFuncElement("_GetCustomAttributeProps", MetaDataImport::GetCustomAttributeProps)
FCFuncElement("_GetFieldOffset", MetaDataImport::GetFieldOffset)
FCFuncElement("_GetSigOfFieldDef", MetaDataImport::GetSigOfFieldDef)
FCFuncElement("_GetSigOfMethodDef", MetaDataImport::GetSigOfMethodDef)
FCFuncElement("_GetFieldMarshal", MetaDataImport::GetFieldMarshal)
FCFuncElement("_GetPInvokeMap", MetaDataImport::GetPinvokeMap)
FCFuncElement("_IsValidToken", MetaDataImport::IsValidToken)
FCFuncElement("_GetMarshalAs", MetaDataImport::GetMarshalAs)
FCFuncEnd()
FCFuncStart(gSignatureNative)
FCFuncElement("GetSignature", SignatureNative::GetSignature)
FCFuncElement("GetCustomModifiers", SignatureNative::GetCustomModifiers)
FCFuncElement("CompareSig", SignatureNative::CompareSig)
FCFuncEnd()
FCFuncStart(gRuntimeMethodHandle)
FCFuncElement("_GetCurrentMethod", RuntimeMethodHandle::GetCurrentMethod)
FCFuncElement("InvokeMethod", RuntimeMethodHandle::InvokeMethod)
FCFuncElement("GetImplAttributes", RuntimeMethodHandle::GetImplAttributes)
FCFuncElement("GetAttributes", RuntimeMethodHandle::GetAttributes)
FCFuncElement("GetDeclaringType", RuntimeMethodHandle::GetDeclaringType)
FCFuncElement("GetSlot", RuntimeMethodHandle::GetSlot)
FCFuncElement("GetMethodDef", RuntimeMethodHandle::GetMethodDef)
FCFuncElement("GetName", RuntimeMethodHandle::GetName)
FCFuncElement("_GetUtf8Name", RuntimeMethodHandle::GetUtf8Name)
FCFuncElement("MatchesNameHash", RuntimeMethodHandle::MatchesNameHash)
FCFuncElement("HasMethodInstantiation", RuntimeMethodHandle::HasMethodInstantiation)
FCFuncElement("IsGenericMethodDefinition", RuntimeMethodHandle::IsGenericMethodDefinition)
FCFuncElement("GetGenericParameterCount", RuntimeMethodHandle::GetGenericParameterCount)
FCFuncElement("IsTypicalMethodDefinition", RuntimeMethodHandle::IsTypicalMethodDefinition)
FCFuncElement("GetStubIfNeeded", RuntimeMethodHandle::GetStubIfNeeded)
FCFuncElement("GetMethodFromCanonical", RuntimeMethodHandle::GetMethodFromCanonical)
FCFuncElement("IsDynamicMethod", RuntimeMethodHandle::IsDynamicMethod)
FCFuncElement("GetMethodBody", RuntimeMethodHandle::GetMethodBody)
FCFuncElement("IsConstructor", RuntimeMethodHandle::IsConstructor)
FCFuncElement("GetResolver", RuntimeMethodHandle::GetResolver)
FCFuncElement("GetLoaderAllocator", RuntimeMethodHandle::GetLoaderAllocator)
FCFuncEnd()
FCFuncStart(gCOMFieldHandleNewFuncs)
FCFuncElement("GetValue", RuntimeFieldHandle::GetValue)
FCFuncElement("SetValue", RuntimeFieldHandle::SetValue)
FCFuncElement("GetValueDirect", RuntimeFieldHandle::GetValueDirect)
FCFuncElement("SetValueDirect", RuntimeFieldHandle::SetValueDirect)
FCFuncElement("GetName", RuntimeFieldHandle::GetName)
FCFuncElement("_GetUtf8Name", RuntimeFieldHandle::GetUtf8Name)
FCFuncElement("MatchesNameHash", RuntimeFieldHandle::MatchesNameHash)
FCFuncElement("GetAttributes", RuntimeFieldHandle::GetAttributes)
FCFuncElement("GetApproxDeclaringType", RuntimeFieldHandle::GetApproxDeclaringType)
FCFuncElement("GetToken", RuntimeFieldHandle::GetToken)
FCFuncElement("GetStaticFieldForGenericType", RuntimeFieldHandle::GetStaticFieldForGenericType)
FCFuncElement("AcquiresContextFromThis", RuntimeFieldHandle::AcquiresContextFromThis)
FCFuncEnd()
FCFuncStart(gCOMModuleFuncs)
FCFuncElement("GetTypes", COMModule::GetTypes)
FCFuncEnd()
FCFuncStart(gCOMModuleHandleFuncs)
FCFuncElement("GetToken", ModuleHandle::GetToken)
FCFuncElement("GetDynamicMethod", ModuleHandle::GetDynamicMethod)
FCFuncElement("_GetMetadataImport", ModuleHandle::GetMetadataImport)
FCFuncElement("GetMDStreamVersion", ModuleHandle::GetMDStreamVersion)
FCFuncEnd()
FCFuncStart(gCustomAttributeEncodedArgument)
FCFuncElement("ParseAttributeArguments", Attribute::ParseAttributeArguments)
FCFuncEnd()
FCFuncStart(gCOMCustomAttributeFuncs)
FCFuncElement("_ParseAttributeUsageAttribute", COMCustomAttribute::ParseAttributeUsageAttribute)
FCFuncElement("_CreateCaObject", COMCustomAttribute::CreateCaObject)
FCFuncElement("_GetPropertyOrFieldData", COMCustomAttribute::GetPropertyOrFieldData)
FCFuncEnd()
FCFuncStart(gCompatibilitySwitchFuncs)
FCFuncElement("GetValueInternal", CompatibilitySwitch::GetValue)
FCFuncEnd()
FCFuncStart(gRuntimeAssemblyFuncs)
FCFuncElement("FCallIsDynamic", AssemblyNative::IsDynamic)
FCFuncElement("GetReferencedAssemblies", AssemblyNative::GetReferencedAssemblies)
FCFuncElement("GetManifestResourceNames", AssemblyNative::GetManifestResourceNames)
FCFuncElement("GetManifestModule", AssemblyHandle::GetManifestModule)
FCFuncElement("GetToken", AssemblyHandle::GetToken)
FCFuncEnd()
FCFuncStart(gAssemblyLoadContextFuncs)
FCFuncElement("GetLoadedAssemblies", AppDomainNative::GetLoadedAssemblies)
FCFuncElement("IsTracingEnabled", AssemblyNative::IsTracingEnabled)
FCFuncEnd()
FCFuncStart(gAssemblyBuilderFuncs)
FCFuncElement("GetInMemoryAssemblyModule", AssemblyNative::GetInMemoryAssemblyModule)
FCFuncEnd()
FCFuncStart(gDelegateFuncs)
FCFuncElement("BindToMethodName", COMDelegate::BindToMethodName)
FCFuncElement("BindToMethodInfo", COMDelegate::BindToMethodInfo)
FCFuncElement("GetMulticastInvoke", COMDelegate::GetMulticastInvoke)
FCFuncElement("GetInvokeMethod", COMDelegate::GetInvokeMethod)
FCFuncElement("InternalAlloc", COMDelegate::InternalAlloc)
FCFuncElement("InternalAllocLike", COMDelegate::InternalAllocLike)
FCFuncElement("InternalEqualTypes", COMDelegate::InternalEqualTypes)
FCFuncElement("InternalEqualMethodHandles", COMDelegate::InternalEqualMethodHandles)
FCFuncElement("FindMethodHandle", COMDelegate::FindMethodHandle)
FCFuncElement("AdjustTarget", COMDelegate::AdjustTarget)
FCFuncElement("GetCallStub", COMDelegate::GetCallStub)
FCFuncElement("CompareUnmanagedFunctionPtrs", COMDelegate::CompareUnmanagedFunctionPtrs)
// The FCall mechanism knows how to wire multiple different constructor calls into a
// single entrypoint, without the following entry. But we need this entry to satisfy
// frame creation within the body:
FCFuncElement("DelegateConstruct", COMDelegate::DelegateConstruct)
FCFuncEnd()
FCFuncStart(gMathFuncs)
FCFuncElement("Acos", COMDouble::Acos)
FCFuncElement("Acosh", COMDouble::Acosh)
FCFuncElement("Asin", COMDouble::Asin)
FCFuncElement("Asinh", COMDouble::Asinh)
FCFuncElement("Atan", COMDouble::Atan)
FCFuncElement("Atanh", COMDouble::Atanh)
FCFuncElement("Atan2", COMDouble::Atan2)
FCFuncElement("Cbrt", COMDouble::Cbrt)
FCFuncElement("Ceiling", COMDouble::Ceil)
FCFuncElement("Cos", COMDouble::Cos)
FCFuncElement("Cosh", COMDouble::Cosh)
FCFuncElement("Exp", COMDouble::Exp)
FCFuncElement("Floor", COMDouble::Floor)
FCFuncElement("FMod", COMDouble::FMod)
FCFuncElement("FusedMultiplyAdd", COMDouble::FusedMultiplyAdd)
FCFuncElement("Log", COMDouble::Log)
FCFuncElement("Log2", COMDouble::Log2)
FCFuncElement("Log10", COMDouble::Log10)
FCFuncElement("ModF", COMDouble::ModF)
FCFuncElement("Pow", COMDouble::Pow)
FCFuncElement("Sin", COMDouble::Sin)
FCFuncElement("SinCos", COMDouble::SinCos)
FCFuncElement("Sinh", COMDouble::Sinh)
FCFuncElement("Sqrt", COMDouble::Sqrt)
FCFuncElement("Tan", COMDouble::Tan)
FCFuncElement("Tanh", COMDouble::Tanh)
FCFuncEnd()
FCFuncStart(gMathFFuncs)
FCFuncElement("Acos", COMSingle::Acos)
FCFuncElement("Acosh", COMSingle::Acosh)
FCFuncElement("Asin", COMSingle::Asin)
FCFuncElement("Asinh", COMSingle::Asinh)
FCFuncElement("Atan", COMSingle::Atan)
FCFuncElement("Atanh", COMSingle::Atanh)
FCFuncElement("Atan2", COMSingle::Atan2)
FCFuncElement("Cbrt", COMSingle::Cbrt)
FCFuncElement("Ceiling", COMSingle::Ceil)
FCFuncElement("Cos", COMSingle::Cos)
FCFuncElement("Cosh", COMSingle::Cosh)
FCFuncElement("Exp", COMSingle::Exp)
FCFuncElement("Floor", COMSingle::Floor)
FCFuncElement("FMod", COMSingle::FMod)
FCFuncElement("FusedMultiplyAdd", COMSingle::FusedMultiplyAdd)
FCFuncElement("Log", COMSingle::Log)
FCFuncElement("Log2", COMSingle::Log2)
FCFuncElement("Log10", COMSingle::Log10)
FCFuncElement("ModF", COMSingle::ModF)
FCFuncElement("Pow", COMSingle::Pow)
FCFuncElement("Sin", COMSingle::Sin)
FCFuncElement("SinCos", COMSingle::SinCos)
FCFuncElement("Sinh", COMSingle::Sinh)
FCFuncElement("Sqrt", COMSingle::Sqrt)
FCFuncElement("Tan", COMSingle::Tan)
FCFuncElement("Tanh", COMSingle::Tanh)
FCFuncEnd()
FCFuncStart(gThreadFuncs)
FCFuncElement("InternalGetCurrentThread", GetThread)
#undef Sleep
FCFuncElement("SleepInternal", ThreadNative::Sleep)
#define Sleep(a) Dont_Use_Sleep(a)
FCFuncElement("Initialize", ThreadNative::Initialize)
FCFuncElement("SpinWaitInternal", ThreadNative::SpinWait)
FCFuncElement("GetCurrentThreadNative", ThreadNative::GetCurrentThread)
FCFuncElement("get_ManagedThreadId", ThreadNative::GetManagedThreadId)
FCFuncElement("InternalFinalize", ThreadNative::Finalize)
FCFuncElement("get_IsAlive", ThreadNative::IsAlive)
FCFuncElement("IsBackgroundNative", ThreadNative::IsBackground)
FCFuncElement("SetBackgroundNative", ThreadNative::SetBackground)
FCFuncElement("get_IsThreadPoolThread", ThreadNative::IsThreadpoolThread)
FCFuncElement("set_IsThreadPoolThread", ThreadNative::SetIsThreadpoolThread)
FCFuncElement("GetPriorityNative", ThreadNative::GetPriority)
FCFuncElement("SetPriorityNative", ThreadNative::SetPriority)
FCFuncElement("GetThreadStateNative", ThreadNative::GetThreadState)
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
FCFuncElement("GetApartmentStateNative", ThreadNative::GetApartmentState)
FCFuncElement("SetApartmentStateNative", ThreadNative::SetApartmentState)
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
#ifdef FEATURE_COMINTEROP
FCFuncElement("DisableComObjectEagerCleanup", ThreadNative::DisableComObjectEagerCleanup)
#endif // FEATURE_COMINTEROP
FCFuncElement("Interrupt", ThreadNative::Interrupt)
FCFuncElement("Join", ThreadNative::Join)
FCFuncElement("get_OptimalMaxSpinWaitsPerSpinIteration", ThreadNative::GetOptimalMaxSpinWaitsPerSpinIteration)
FCFuncElement("GetCurrentProcessorNumber", ThreadNative::GetCurrentProcessorNumber)
FCFuncEnd()
FCFuncStart(gThreadPoolFuncs)
FCFuncElement("GetNextConfigUInt32Value", ThreadPoolNative::GetNextConfigUInt32Value)
FCFuncElement("PostQueuedCompletionStatus", ThreadPoolNative::CorPostQueuedCompletionStatus)
FCFuncElement("GetAvailableThreadsNative", ThreadPoolNative::CorGetAvailableThreads)
FCFuncElement("CanSetMinIOCompletionThreads", ThreadPoolNative::CorCanSetMinIOCompletionThreads)
FCFuncElement("CanSetMaxIOCompletionThreads", ThreadPoolNative::CorCanSetMaxIOCompletionThreads)
FCFuncElement("SetMinThreadsNative", ThreadPoolNative::CorSetMinThreads)
FCFuncElement("GetMinThreadsNative", ThreadPoolNative::CorGetMinThreads)
FCFuncElement("GetThreadCount", ThreadPoolNative::GetThreadCount)
FCFuncElement("GetPendingUnmanagedWorkItemCount", ThreadPoolNative::GetPendingUnmanagedWorkItemCount)
FCFuncElement("RegisterWaitForSingleObjectNative", ThreadPoolNative::CorRegisterWaitForSingleObject)
FCFuncElement("BindIOCompletionCallbackNative", ThreadPoolNative::CorBindIoCompletionCallback)
FCFuncElement("SetMaxThreadsNative", ThreadPoolNative::CorSetMaxThreads)
FCFuncElement("GetMaxThreadsNative", ThreadPoolNative::CorGetMaxThreads)
FCFuncElement("NotifyWorkItemCompleteNative", ThreadPoolNative::NotifyRequestComplete)
FCFuncElement("NotifyWorkItemProgressNative", ThreadPoolNative::NotifyRequestProgress)
FCFuncElement("GetEnableWorkerTrackingNative", ThreadPoolNative::GetEnableWorkerTracking)
FCFuncElement("ReportThreadStatusNative", ThreadPoolNative::ReportThreadStatus)
FCFuncEnd()
FCFuncStart(gRegisteredWaitHandleFuncs)
FCFuncElement("UnregisterWaitNative", ThreadPoolNative::CorUnregisterWait)
FCFuncElement("WaitHandleCleanupNative", ThreadPoolNative::CorWaitHandleCleanupNative)
FCFuncEnd()
FCFuncStart(gWaitHandleFuncs)
FCFuncElement("WaitOneCore", WaitHandleNative::CorWaitOneNative)
FCFuncElement("WaitMultipleIgnoringSyncContext", WaitHandleNative::CorWaitMultipleNative)
FCFuncElement("SignalAndWaitNative", WaitHandleNative::CorSignalAndWaitOneNative)
FCFuncEnd()
#ifdef FEATURE_COMINTEROP
FCFuncStart(gVariantFuncs)
FCFuncElement("SetFieldsObject", COMVariant::SetFieldsObject)
FCFuncElement("BoxEnum", COMVariant::BoxEnum)
FCFuncEnd()
#endif // FEATURE_COMINTEROP
#ifdef FEATURE_COMINTEROP
FCFuncStart(gOAVariantFuncs)
FCFuncElement("ChangeTypeEx", COMOAVariant::ChangeTypeEx)
FCFuncEnd()
#endif // FEATURE_COMINTEROP
FCFuncStart(gCastHelpers)
FCFuncElement("IsInstanceOfAny_NoCacheLookup", ::IsInstanceOfAny_NoCacheLookup)
FCFuncElement("ChkCastAny_NoCacheLookup", ::ChkCastAny_NoCacheLookup)
FCFuncElement("Unbox_Helper", ::Unbox_Helper)
FCFuncElement("WriteBarrier", ::WriteBarrier_Helper)
FCFuncEnd()
FCFuncStart(gArrayFuncs)
FCFuncElement("GetCorElementTypeOfElementType", ArrayNative::GetCorElementTypeOfElementType)
FCFuncElement("Initialize", ArrayNative::Initialize)
FCFuncElement("IsSimpleCopy", ArrayNative::IsSimpleCopy)
FCFuncElement("CopySlow", ArrayNative::CopySlow)
FCFuncElement("InternalCreate", ArrayNative::CreateInstance)
FCFuncElement("InternalGetValue", ArrayNative::GetValue)
FCFuncElement("InternalSetValue", ArrayNative::SetValue)
FCFuncEnd()
FCFuncStart(gBufferFuncs)
FCFuncElement("__BulkMoveWithWriteBarrier", Buffer::BulkMoveWithWriteBarrier)
FCFuncEnd()
FCFuncStart(gGCInterfaceFuncs)
FCFuncElement("GetGenerationWR", GCInterface::GetGenerationWR)
FCFuncElement("_RegisterForFullGCNotification", GCInterface::RegisterForFullGCNotification)
FCFuncElement("_CancelFullGCNotification", GCInterface::CancelFullGCNotification)
FCFuncElement("_WaitForFullGCApproach", GCInterface::WaitForFullGCApproach)
FCFuncElement("_WaitForFullGCComplete", GCInterface::WaitForFullGCComplete)
FCFuncElement("_CollectionCount", GCInterface::CollectionCount)
FCFuncElement("GetMemoryInfo", GCInterface::GetMemoryInfo)
FCFuncElement("GetMemoryLoad", GCInterface::GetMemoryLoad)
FCFuncElement("GetSegmentSize", GCInterface::GetSegmentSize)
FCFuncElement("GetLastGCPercentTimeInGC", GCInterface::GetLastGCPercentTimeInGC)
FCFuncElement("GetGenerationSize", GCInterface::GetGenerationSize)
FCFuncElement("GetGeneration", GCInterface::GetGeneration)
FCFuncElement("GetMaxGeneration", GCInterface::GetMaxGeneration)
FCFuncElement("_SuppressFinalize", GCInterface::SuppressFinalize)
FCFuncElement("_ReRegisterForFinalize", GCInterface::ReRegisterForFinalize)
FCFuncElement("GetAllocatedBytesForCurrentThread", GCInterface::GetAllocatedBytesForCurrentThread)
FCFuncElement("GetTotalAllocatedBytes", GCInterface::GetTotalAllocatedBytes)
FCFuncElement("AllocateNewArray", GCInterface::AllocateNewArray)
FCFuncEnd()
FCFuncStart(gGCSettingsFuncs)
FCFuncElement("get_IsServerGC", SystemNative::IsServerGC)
FCFuncElement("GetGCLatencyMode", GCInterface::GetGcLatencyMode)
FCFuncElement("GetLOHCompactionMode", GCInterface::GetLOHCompactionMode)
FCFuncElement("SetGCLatencyMode", GCInterface::SetGcLatencyMode)
FCFuncElement("SetLOHCompactionMode", GCInterface::SetLOHCompactionMode)
FCFuncEnd()
FCFuncStart(gInteropMarshalFuncs)
FCFuncElement("GetLastPInvokeError", MarshalNative::GetLastPInvokeError)
FCFuncElement("SetLastPInvokeError", MarshalNative::SetLastPInvokeError)
FCFuncElement("SizeOfHelper", MarshalNative::SizeOfClass)
FCFuncElement("StructureToPtr", MarshalNative::StructureToPtr)
FCFuncElement("PtrToStructureHelper", MarshalNative::PtrToStructureHelper)
FCFuncElement("DestroyStructure", MarshalNative::DestroyStructure)
FCFuncElement("IsPinnable", MarshalNative::IsPinnable)
FCFuncElement("GetExceptionCode", ExceptionNative::GetExceptionCode)
FCFuncElement("GetExceptionPointers", ExceptionNative::GetExceptionPointers)
FCFuncElement("OffsetOfHelper", MarshalNative::OffsetOfHelper)
FCFuncElement("GetExceptionForHRInternal", MarshalNative::GetExceptionForHR)
FCFuncElement("GetDelegateForFunctionPointerInternal", MarshalNative::GetDelegateForFunctionPointerInternal)
FCFuncElement("GetFunctionPointerForDelegateInternal", MarshalNative::GetFunctionPointerForDelegateInternal)
#ifdef FEATURE_COMINTEROP
FCFuncElement("GetHRForException", MarshalNative::GetHRForException)
FCFuncElement("GetObjectForIUnknownNative", MarshalNative::GetObjectForIUnknownNative)
FCFuncElement("GetUniqueObjectForIUnknownNative", MarshalNative::GetUniqueObjectForIUnknownNative)
FCFuncElement("GetNativeVariantForObjectNative", MarshalNative::GetNativeVariantForObjectNative)
FCFuncElement("GetObjectForNativeVariantNative", MarshalNative::GetObjectForNativeVariantNative)
FCFuncElement("InternalFinalReleaseComObject", MarshalNative::FinalReleaseComObject)
FCFuncElement("IsTypeVisibleFromCom", MarshalNative::IsTypeVisibleFromCom)
FCFuncElement("CreateAggregatedObjectNative", MarshalNative::CreateAggregatedObjectNative)
FCFuncElement("AreComObjectsAvailableForCleanup", MarshalNative::AreComObjectsAvailableForCleanup)
FCFuncElement("InternalCreateWrapperOfType", MarshalNative::InternalCreateWrapperOfType)
FCFuncElement("GetObjectsForNativeVariantsNative", MarshalNative::GetObjectsForNativeVariantsNative)
FCFuncElement("GetStartComSlot", MarshalNative::GetStartComSlot)
FCFuncElement("GetEndComSlot", MarshalNative::GetEndComSlot)
FCFuncElement("GetIUnknownForObjectNative", MarshalNative::GetIUnknownForObjectNative)
FCFuncElement("GetIDispatchForObjectNative", MarshalNative::GetIDispatchForObjectNative)
FCFuncElement("GetComInterfaceForObjectNative", MarshalNative::GetComInterfaceForObjectNative)
FCFuncElement("InternalReleaseComObject", MarshalNative::ReleaseComObject)
FCFuncElement("GetTypedObjectForIUnknown", MarshalNative::GetTypedObjectForIUnknown)
FCFuncElement("ChangeWrapperHandleStrength", MarshalNative::ChangeWrapperHandleStrength)
FCFuncElement("CleanupUnusedObjectsInCurrentContext", MarshalNative::CleanupUnusedObjectsInCurrentContext)
#endif // FEATURE_COMINTEROP
FCFuncEnd()
FCFuncStart(gMissingMemberExceptionFuncs)
FCFuncElement("FormatSignature", MissingMemberException_FormatSignature)
FCFuncEnd()
FCFuncStart(gInterlockedFuncs)
FCFuncElementSig("Exchange", &gsig_SM_RefInt_Int_RetInt, COMInterlocked::Exchange)
FCFuncElementSig("Exchange", &gsig_SM_RefLong_Long_RetLong, COMInterlocked::Exchange64)
FCFuncElementSig("Exchange", &gsig_SM_RefDbl_Dbl_RetDbl, COMInterlocked::ExchangeDouble)
FCFuncElementSig("Exchange", &gsig_SM_RefFlt_Flt_RetFlt, COMInterlocked::ExchangeFloat)
FCFuncElementSig("Exchange", &gsig_SM_RefObj_Obj_RetObj, COMInterlocked::ExchangeObject)
FCFuncElementSig("CompareExchange", &gsig_SM_RefInt_Int_Int_RetInt, COMInterlocked::CompareExchange)
FCFuncElementSig("CompareExchange", &gsig_SM_RefLong_Long_Long_RetLong, COMInterlocked::CompareExchange64)
FCFuncElementSig("CompareExchange", &gsig_SM_RefDbl_Dbl_Dbl_RetDbl, COMInterlocked::CompareExchangeDouble)
FCFuncElementSig("CompareExchange", &gsig_SM_RefFlt_Flt_Flt_RetFlt, COMInterlocked::CompareExchangeFloat)
FCFuncElementSig("CompareExchange", &gsig_SM_RefObj_Obj_Obj_RetObj, COMInterlocked::CompareExchangeObject)
FCFuncElementSig("ExchangeAdd", &gsig_SM_RefInt_Int_RetInt, COMInterlocked::ExchangeAdd32)
FCFuncElementSig("ExchangeAdd", &gsig_SM_RefLong_Long_RetLong, COMInterlocked::ExchangeAdd64)
FCFuncElement("MemoryBarrier", COMInterlocked::FCMemoryBarrier)
FCFuncElement("ReadMemoryBarrier", COMInterlocked::FCMemoryBarrierLoad)
FCFuncEnd()
FCFuncStart(gJitInfoFuncs)
FCFuncElement("GetCompiledILBytes", GetCompiledILBytes)
FCFuncElement("GetCompiledMethodCount", GetCompiledMethodCount)
FCFuncElement("GetCompilationTimeInTicks", GetCompilationTimeInTicks)
FCFuncEnd()
FCFuncStart(gVarArgFuncs)
FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_IntPtr_PtrVoid_RetVoid, VarArgsNative::Init2)
FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_IntPtr_RetVoid, VarArgsNative::Init)
FCFuncElement("GetRemainingCount", VarArgsNative::GetRemainingCount)
FCFuncElement("_GetNextArgType", VarArgsNative::GetNextArgType)
FCFuncElement("FCallGetNextArg", VarArgsNative::DoGetNextArg)
FCFuncElement("InternalGetNextArg", VarArgsNative::GetNextArg2)
FCFuncEnd()
FCFuncStart(gMonitorFuncs)
FCFuncElement("Enter", JIT_MonEnter)
FCFuncElement("ReliableEnter", JIT_MonReliableEnter)
FCFuncElement("ReliableEnterTimeout", JIT_MonTryEnter)
FCFuncElement("Exit", JIT_MonExit)
FCFuncElement("ObjWait", ObjectNative::WaitTimeout)
FCFuncElement("ObjPulse", ObjectNative::Pulse)
FCFuncElement("ObjPulseAll", ObjectNative::PulseAll)
FCFuncElement("IsEnteredNative", ObjectNative::IsLockHeld)
FCFuncEnd()
FCFuncStart(gOverlappedFuncs)
FCFuncElement("AllocateNativeOverlapped", AllocateNativeOverlapped)
FCFuncElement("FreeNativeOverlapped", FreeNativeOverlapped)
FCFuncElement("CheckVMForIOPacket", CheckVMForIOPacket)
FCFuncElement("GetOverlappedFromNative", GetOverlappedFromNative)
FCFuncEnd()
FCFuncStart(gRuntimeHelpers)
FCFuncElement("GetObjectValue", ObjectNative::GetObjectValue)
FCFuncElement("InitializeArray", ArrayNative::InitializeArray)
FCFuncElement("GetSpanDataFrom", ArrayNative::GetSpanDataFrom)
FCFuncElement("PrepareDelegate", ReflectionInvocation::PrepareDelegate)
FCFuncElement("GetHashCode", ObjectNative::GetHashCode)
FCFuncElement("Equals", ObjectNative::Equals)
FCFuncElement("AllocateUninitializedClone", ObjectNative::AllocateUninitializedClone)
FCFuncElement("EnsureSufficientExecutionStack", ReflectionInvocation::EnsureSufficientExecutionStack)
FCFuncElement("TryEnsureSufficientExecutionStack", ReflectionInvocation::TryEnsureSufficientExecutionStack)
FCFuncElement("AllocTailCallArgBuffer", TailCallHelp::AllocTailCallArgBuffer)
FCFuncElement("GetTailCallInfo", TailCallHelp::GetTailCallInfo)
FCFuncEnd()
FCFuncStart(gMngdFixedArrayMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdFixedArrayMarshaler::CreateMarshaler)
FCFuncElement("ConvertSpaceToNative", MngdFixedArrayMarshaler::ConvertSpaceToNative)
FCFuncElement("ConvertContentsToNative", MngdFixedArrayMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertSpaceToManaged", MngdFixedArrayMarshaler::ConvertSpaceToManaged)
FCFuncElement("ConvertContentsToManaged", MngdFixedArrayMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNativeContents", MngdFixedArrayMarshaler::ClearNativeContents)
FCFuncEnd()
FCFuncStart(gMngdNativeArrayMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdNativeArrayMarshaler::CreateMarshaler)
FCFuncElement("ConvertSpaceToNative", MngdNativeArrayMarshaler::ConvertSpaceToNative)
FCFuncElement("ConvertContentsToNative", MngdNativeArrayMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertSpaceToManaged", MngdNativeArrayMarshaler::ConvertSpaceToManaged)
FCFuncElement("ConvertContentsToManaged", MngdNativeArrayMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNative", MngdNativeArrayMarshaler::ClearNative)
FCFuncElement("ClearNativeContents", MngdNativeArrayMarshaler::ClearNativeContents)
FCFuncEnd()
#ifdef FEATURE_COMINTEROP
FCFuncStart(gObjectMarshalerFuncs)
FCFuncElement("ConvertToNative", StubHelpers::ObjectMarshaler__ConvertToNative)
FCFuncElement("ConvertToManaged", StubHelpers::ObjectMarshaler__ConvertToManaged)
FCFuncElement("ClearNative", StubHelpers::ObjectMarshaler__ClearNative)
FCFuncEnd()
FCFuncStart(gInterfaceMarshalerFuncs)
FCFuncElement("ConvertToNative", StubHelpers::InterfaceMarshaler__ConvertToNative)
FCFuncElement("ConvertToManaged", StubHelpers::InterfaceMarshaler__ConvertToManaged)
FCFuncEnd()
FCFuncStart(gMngdSafeArrayMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdSafeArrayMarshaler::CreateMarshaler)
FCFuncElement("ConvertSpaceToNative", MngdSafeArrayMarshaler::ConvertSpaceToNative)
FCFuncElement("ConvertContentsToNative", MngdSafeArrayMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertSpaceToManaged", MngdSafeArrayMarshaler::ConvertSpaceToManaged)
FCFuncElement("ConvertContentsToManaged", MngdSafeArrayMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNative", MngdSafeArrayMarshaler::ClearNative)
FCFuncEnd()
#endif // FEATURE_COMINTEROP
FCFuncStart(gMngdRefCustomMarshalerFuncs)
FCFuncElement("CreateMarshaler", MngdRefCustomMarshaler::CreateMarshaler)
FCFuncElement("ConvertContentsToNative", MngdRefCustomMarshaler::ConvertContentsToNative)
FCFuncElement("ConvertContentsToManaged", MngdRefCustomMarshaler::ConvertContentsToManaged)
FCFuncElement("ClearNative", MngdRefCustomMarshaler::ClearNative)
FCFuncElement("ClearManaged", MngdRefCustomMarshaler::ClearManaged)
FCFuncEnd()
FCFuncStart(gStubHelperFuncs)
FCFuncElement("GetNDirectTarget", StubHelpers::GetNDirectTarget)
FCFuncElement("GetDelegateTarget", StubHelpers::GetDelegateTarget)
FCFuncElement("SetLastError", StubHelpers::SetLastError)
FCFuncElement("ClearLastError", StubHelpers::ClearLastError)
FCFuncElement("ThrowInteropParamException", StubHelpers::ThrowInteropParamException)
FCFuncElement("InternalGetHRExceptionObject", StubHelpers::GetHRExceptionObject)
#ifdef FEATURE_COMINTEROP
FCFuncElement("InternalGetCOMHRExceptionObject", StubHelpers::GetCOMHRExceptionObject)
FCFuncElement("GetCOMIPFromRCW", StubHelpers::GetCOMIPFromRCW)
#endif // FEATURE_COMINTEROP
#ifdef PROFILING_SUPPORTED
FCFuncElement("ProfilerBeginTransitionCallback", StubHelpers::ProfilerBeginTransitionCallback)
FCFuncElement("ProfilerEndTransitionCallback", StubHelpers::ProfilerEndTransitionCallback)
#endif
FCFuncElement("CreateCustomMarshalerHelper", StubHelpers::CreateCustomMarshalerHelper)
FCFuncElement("FmtClassUpdateNativeInternal", StubHelpers::FmtClassUpdateNativeInternal)
FCFuncElement("FmtClassUpdateCLRInternal", StubHelpers::FmtClassUpdateCLRInternal)
FCFuncElement("LayoutDestroyNativeInternal", StubHelpers::LayoutDestroyNativeInternal)
FCFuncElement("AllocateInternal", StubHelpers::AllocateInternal)
FCFuncElement("MarshalToUnmanagedVaListInternal", StubHelpers::MarshalToUnmanagedVaListInternal)
FCFuncElement("MarshalToManagedVaListInternal", StubHelpers::MarshalToManagedVaListInternal)
FCFuncElement("CalcVaListSize", StubHelpers::CalcVaListSize)
FCFuncElement("ValidateObject", StubHelpers::ValidateObject)
FCFuncElement("ValidateByref", StubHelpers::ValidateByref)
FCFuncElement("LogPinnedArgument", StubHelpers::LogPinnedArgument)
FCFuncElement("GetStubContext", StubHelpers::GetStubContext)
#ifdef FEATURE_ARRAYSTUB_AS_IL
FCFuncElement("ArrayTypeCheck", StubHelpers::ArrayTypeCheck)
#endif //FEATURE_ARRAYSTUB_AS_IL
#ifdef FEATURE_MULTICASTSTUB_AS_IL
FCFuncElement("MulticastDebuggerTraceHelper", StubHelpers::MulticastDebuggerTraceHelper)
#endif //FEATURE_MULTICASTSTUB_AS_IL
FCFuncElement("NextCallReturnAddress", StubHelpers::NextCallReturnAddress)
FCFuncEnd()
FCFuncStart(gGCHandleFuncs)
FCFuncElement("InternalAlloc", MarshalNative::GCHandleInternalAlloc)
FCFuncElement("InternalFree", MarshalNative::GCHandleInternalFree)
FCFuncElement("InternalGet", MarshalNative::GCHandleInternalGet)
FCFuncElement("InternalSet", MarshalNative::GCHandleInternalSet)
FCFuncElement("InternalCompareExchange", MarshalNative::GCHandleInternalCompareExchange)
FCFuncEnd()
FCFuncStart(gStreamFuncs)
FCFuncElement("HasOverriddenBeginEndRead", StreamNative::HasOverriddenBeginEndRead)
FCFuncElement("HasOverriddenBeginEndWrite", StreamNative::HasOverriddenBeginEndWrite)
FCFuncEnd()
FCFuncStart(gWeakReferenceFuncs)
FCFuncElement("Create", WeakReferenceNative::Create)
FCFuncElement("Finalize", WeakReferenceNative::Finalize)
FCFuncElement("get_Target", WeakReferenceNative::GetTarget)
FCFuncElement("set_Target", WeakReferenceNative::SetTarget)
FCFuncElement("get_IsAlive", WeakReferenceNative::IsAlive)
FCFuncElement("IsTrackResurrection", WeakReferenceNative::IsTrackResurrection)
FCFuncEnd()
FCFuncStart(gWeakReferenceOfTFuncs)
FCFuncElement("Create", WeakReferenceOfTNative::Create)
FCFuncElement("Finalize", WeakReferenceOfTNative::Finalize)
FCFuncElement("get_Target", WeakReferenceOfTNative::GetTarget)
FCFuncElement("set_Target", WeakReferenceOfTNative::SetTarget)
FCFuncElement("IsTrackResurrection", WeakReferenceOfTNative::IsTrackResurrection)
FCFuncEnd()
#ifdef FEATURE_COMINTEROP
//
// ECall helpers for the standard managed interfaces.
//
#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
FCFuncStart(g##FriendlyName##Funcs)
#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, FCallMethName, MethName, MethSig, FcallDecl) \
FCUnreferenced FCFuncElementSig(#MethName, MethSig, FriendlyName::FCallMethName)
#define MNGSTDITF_END_INTERFACE(FriendlyName) \
FCFuncEnd()
#include "mngstditflist.h"
#undef MNGSTDITF_BEGIN_INTERFACE
#undef MNGSTDITF_DEFINE_METH_IMPL
#undef MNGSTDITF_END_INTERFACE
#endif // FEATURE_COMINTEROP
//
//
// Class definitions
//
//
// Note these have to remain sorted by name:namespace pair (Assert will wack you if you don't)
// The sorting is case-sensitive
FCClassElement("ArgIterator", "System", gVarArgFuncs)
FCClassElement("Array", "System", gArrayFuncs)
FCClassElement("AssemblyBuilder", "System.Reflection.Emit", gAssemblyBuilderFuncs)
FCClassElement("AssemblyLoadContext", "System.Runtime.Loader", gAssemblyLoadContextFuncs)
FCClassElement("Buffer", "System", gBufferFuncs)
FCClassElement("CastHelpers", "System.Runtime.CompilerServices", gCastHelpers)
FCClassElement("CompatibilitySwitch", "System.Runtime.Versioning", gCompatibilitySwitchFuncs)
FCClassElement("CustomAttribute", "System.Reflection", gCOMCustomAttributeFuncs)
FCClassElement("CustomAttributeEncodedArgument", "System.Reflection", gCustomAttributeEncodedArgument)
FCClassElement("Debugger", "System.Diagnostics", gDiagnosticsDebugger)
FCClassElement("Delegate", "System", gDelegateFuncs)
FCClassElement("DependentHandle", "System.Runtime", gDependentHandleFuncs)
FCClassElement("Enum", "System", gEnumFuncs)
FCClassElement("Environment", "System", gEnvironmentFuncs)
FCClassElement("Exception", "System", gExceptionFuncs)
FCClassElement("GC", "System", gGCInterfaceFuncs)
FCClassElement("GCHandle", "System.Runtime.InteropServices", gGCHandleFuncs)
FCClassElement("GCSettings", "System.Runtime", gGCSettingsFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("IEnumerable", "System.Collections", gStdMngIEnumerableFuncs)
FCClassElement("IEnumerator", "System.Collections", gStdMngIEnumeratorFuncs)
FCClassElement("IReflect", "System.Reflection", gStdMngIReflectFuncs)
FCClassElement("InterfaceMarshaler", "System.StubHelpers", gInterfaceMarshalerFuncs)
#endif
FCClassElement("Interlocked", "System.Threading", gInterlockedFuncs)
FCClassElement("JitInfo", "System.Runtime", gJitInfoFuncs)
FCClassElement("Marshal", "System.Runtime.InteropServices", gInteropMarshalFuncs)
FCClassElement("Math", "System", gMathFuncs)
FCClassElement("MathF", "System", gMathFFuncs)
FCClassElement("MetadataImport", "System.Reflection", gMetaDataImport)
FCClassElement("MissingMemberException", "System", gMissingMemberExceptionFuncs)
FCClassElement("MngdFixedArrayMarshaler", "System.StubHelpers", gMngdFixedArrayMarshalerFuncs)
FCClassElement("MngdNativeArrayMarshaler", "System.StubHelpers", gMngdNativeArrayMarshalerFuncs)
FCClassElement("MngdRefCustomMarshaler", "System.StubHelpers", gMngdRefCustomMarshalerFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("MngdSafeArrayMarshaler", "System.StubHelpers", gMngdSafeArrayMarshalerFuncs)
#endif // FEATURE_COMINTEROP
FCClassElement("ModuleHandle", "System", gCOMModuleHandleFuncs)
FCClassElement("Monitor", "System.Threading", gMonitorFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("OAVariantLib", "Microsoft.Win32", gOAVariantFuncs)
#endif
FCClassElement("Object", "System", gObjectFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("ObjectMarshaler", "System.StubHelpers", gObjectMarshalerFuncs)
#endif
FCClassElement("OverlappedData", "System.Threading", gOverlappedFuncs)
FCClassElement("RegisteredWaitHandle", "System.Threading", gRegisteredWaitHandleFuncs)
FCClassElement("RuntimeAssembly", "System.Reflection", gRuntimeAssemblyFuncs)
FCClassElement("RuntimeFieldHandle", "System", gCOMFieldHandleNewFuncs)
FCClassElement("RuntimeHelpers", "System.Runtime.CompilerServices", gRuntimeHelpers)
FCClassElement("RuntimeMethodHandle", "System", gRuntimeMethodHandle)
FCClassElement("RuntimeModule", "System.Reflection", gCOMModuleFuncs)
FCClassElement("RuntimeType", "System", gSystem_RuntimeType)
FCClassElement("RuntimeTypeHandle", "System", gCOMTypeHandleFuncs)
FCClassElement("Signature", "System", gSignatureNative)
FCClassElement("StackTrace", "System.Diagnostics", gDiagnosticsStackTrace)
FCClassElement("Stream", "System.IO", gStreamFuncs)
FCClassElement("String", "System", gStringFuncs)
FCClassElement("StubHelpers", "System.StubHelpers", gStubHelperFuncs)
FCClassElement("Thread", "System.Threading", gThreadFuncs)
FCClassElement("ThreadPool", "System.Threading", gThreadPoolFuncs)
FCClassElement("Type", "System", gSystem_Type)
FCClassElement("TypedReference", "System", gTypedReferenceFuncs)
FCClassElement("ValueType", "System", gValueTypeFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("Variant", "System", gVariantFuncs)
#endif
FCClassElement("WaitHandle", "System.Threading", gWaitHandleFuncs)
FCClassElement("WeakReference", "System", gWeakReferenceFuncs)
FCClassElement("WeakReference`1", "System", gWeakReferenceOfTFuncs)
#undef FCFuncElement
#undef FCFuncElementSig
#undef FCDynamic
#undef FCDynamicSig
#undef FCUnreferenced
#undef FCFuncStart
#undef FCFuncEnd
#undef FCClassElement
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/native/external/brotli/common/transform.h | /* transforms is a part of ABI, but not API.
It means that there are some functions that are supposed to be in "common"
library, but header itself is not placed into include/brotli. This way,
aforementioned functions will be available only to brotli internals.
*/
#ifndef BROTLI_COMMON_TRANSFORM_H_
#define BROTLI_COMMON_TRANSFORM_H_
#include <brotli/port.h>
#include <brotli/types.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
enum BrotliWordTransformType {
BROTLI_TRANSFORM_IDENTITY = 0,
BROTLI_TRANSFORM_OMIT_LAST_1 = 1,
BROTLI_TRANSFORM_OMIT_LAST_2 = 2,
BROTLI_TRANSFORM_OMIT_LAST_3 = 3,
BROTLI_TRANSFORM_OMIT_LAST_4 = 4,
BROTLI_TRANSFORM_OMIT_LAST_5 = 5,
BROTLI_TRANSFORM_OMIT_LAST_6 = 6,
BROTLI_TRANSFORM_OMIT_LAST_7 = 7,
BROTLI_TRANSFORM_OMIT_LAST_8 = 8,
BROTLI_TRANSFORM_OMIT_LAST_9 = 9,
BROTLI_TRANSFORM_UPPERCASE_FIRST = 10,
BROTLI_TRANSFORM_UPPERCASE_ALL = 11,
BROTLI_TRANSFORM_OMIT_FIRST_1 = 12,
BROTLI_TRANSFORM_OMIT_FIRST_2 = 13,
BROTLI_TRANSFORM_OMIT_FIRST_3 = 14,
BROTLI_TRANSFORM_OMIT_FIRST_4 = 15,
BROTLI_TRANSFORM_OMIT_FIRST_5 = 16,
BROTLI_TRANSFORM_OMIT_FIRST_6 = 17,
BROTLI_TRANSFORM_OMIT_FIRST_7 = 18,
BROTLI_TRANSFORM_OMIT_FIRST_8 = 19,
BROTLI_TRANSFORM_OMIT_FIRST_9 = 20,
BROTLI_TRANSFORM_SHIFT_FIRST = 21,
BROTLI_TRANSFORM_SHIFT_ALL = 22,
BROTLI_NUM_TRANSFORM_TYPES /* Counts transforms, not a transform itself. */
};
#define BROTLI_TRANSFORMS_MAX_CUT_OFF BROTLI_TRANSFORM_OMIT_LAST_9
typedef struct BrotliTransforms {
uint16_t prefix_suffix_size;
/* Last character must be null, so prefix_suffix_size must be at least 1. */
const uint8_t* prefix_suffix;
const uint16_t* prefix_suffix_map;
uint32_t num_transforms;
/* Each entry is a [prefix_id, transform, suffix_id] triplet. */
const uint8_t* transforms;
/* Shift for BROTLI_TRANSFORM_SHIFT_FIRST and BROTLI_TRANSFORM_SHIFT_ALL,
must be NULL if and only if no such transforms are present. */
const uint8_t* params;
/* Indices of transforms like ["", BROTLI_TRANSFORM_OMIT_LAST_#, ""].
0-th element corresponds to ["", BROTLI_TRANSFORM_IDENTITY, ""].
-1, if cut-off transform does not exist. */
int16_t cutOffTransforms[BROTLI_TRANSFORMS_MAX_CUT_OFF + 1];
} BrotliTransforms;
/* T is BrotliTransforms*; result is uint8_t. */
#define BROTLI_TRANSFORM_PREFIX_ID(T, I) ((T)->transforms[((I) * 3) + 0])
#define BROTLI_TRANSFORM_TYPE(T, I) ((T)->transforms[((I) * 3) + 1])
#define BROTLI_TRANSFORM_SUFFIX_ID(T, I) ((T)->transforms[((I) * 3) + 2])
/* T is BrotliTransforms*; result is const uint8_t*. */
#define BROTLI_TRANSFORM_PREFIX(T, I) (&(T)->prefix_suffix[ \
(T)->prefix_suffix_map[BROTLI_TRANSFORM_PREFIX_ID(T, I)]])
#define BROTLI_TRANSFORM_SUFFIX(T, I) (&(T)->prefix_suffix[ \
(T)->prefix_suffix_map[BROTLI_TRANSFORM_SUFFIX_ID(T, I)]])
BROTLI_COMMON_API const BrotliTransforms* BrotliGetTransforms(void);
BROTLI_COMMON_API int BrotliTransformDictionaryWord(
uint8_t* dst, const uint8_t* word, int len,
const BrotliTransforms* transforms, int transform_idx);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_COMMON_TRANSFORM_H_ */
| /* transforms is a part of ABI, but not API.
It means that there are some functions that are supposed to be in "common"
library, but header itself is not placed into include/brotli. This way,
aforementioned functions will be available only to brotli internals.
*/
#ifndef BROTLI_COMMON_TRANSFORM_H_
#define BROTLI_COMMON_TRANSFORM_H_
#include <brotli/port.h>
#include <brotli/types.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
enum BrotliWordTransformType {
BROTLI_TRANSFORM_IDENTITY = 0,
BROTLI_TRANSFORM_OMIT_LAST_1 = 1,
BROTLI_TRANSFORM_OMIT_LAST_2 = 2,
BROTLI_TRANSFORM_OMIT_LAST_3 = 3,
BROTLI_TRANSFORM_OMIT_LAST_4 = 4,
BROTLI_TRANSFORM_OMIT_LAST_5 = 5,
BROTLI_TRANSFORM_OMIT_LAST_6 = 6,
BROTLI_TRANSFORM_OMIT_LAST_7 = 7,
BROTLI_TRANSFORM_OMIT_LAST_8 = 8,
BROTLI_TRANSFORM_OMIT_LAST_9 = 9,
BROTLI_TRANSFORM_UPPERCASE_FIRST = 10,
BROTLI_TRANSFORM_UPPERCASE_ALL = 11,
BROTLI_TRANSFORM_OMIT_FIRST_1 = 12,
BROTLI_TRANSFORM_OMIT_FIRST_2 = 13,
BROTLI_TRANSFORM_OMIT_FIRST_3 = 14,
BROTLI_TRANSFORM_OMIT_FIRST_4 = 15,
BROTLI_TRANSFORM_OMIT_FIRST_5 = 16,
BROTLI_TRANSFORM_OMIT_FIRST_6 = 17,
BROTLI_TRANSFORM_OMIT_FIRST_7 = 18,
BROTLI_TRANSFORM_OMIT_FIRST_8 = 19,
BROTLI_TRANSFORM_OMIT_FIRST_9 = 20,
BROTLI_TRANSFORM_SHIFT_FIRST = 21,
BROTLI_TRANSFORM_SHIFT_ALL = 22,
BROTLI_NUM_TRANSFORM_TYPES /* Counts transforms, not a transform itself. */
};
#define BROTLI_TRANSFORMS_MAX_CUT_OFF BROTLI_TRANSFORM_OMIT_LAST_9
typedef struct BrotliTransforms {
uint16_t prefix_suffix_size;
/* Last character must be null, so prefix_suffix_size must be at least 1. */
const uint8_t* prefix_suffix;
const uint16_t* prefix_suffix_map;
uint32_t num_transforms;
/* Each entry is a [prefix_id, transform, suffix_id] triplet. */
const uint8_t* transforms;
/* Shift for BROTLI_TRANSFORM_SHIFT_FIRST and BROTLI_TRANSFORM_SHIFT_ALL,
must be NULL if and only if no such transforms are present. */
const uint8_t* params;
/* Indices of transforms like ["", BROTLI_TRANSFORM_OMIT_LAST_#, ""].
0-th element corresponds to ["", BROTLI_TRANSFORM_IDENTITY, ""].
-1, if cut-off transform does not exist. */
int16_t cutOffTransforms[BROTLI_TRANSFORMS_MAX_CUT_OFF + 1];
} BrotliTransforms;
/* T is BrotliTransforms*; result is uint8_t. */
#define BROTLI_TRANSFORM_PREFIX_ID(T, I) ((T)->transforms[((I) * 3) + 0])
#define BROTLI_TRANSFORM_TYPE(T, I) ((T)->transforms[((I) * 3) + 1])
#define BROTLI_TRANSFORM_SUFFIX_ID(T, I) ((T)->transforms[((I) * 3) + 2])
/* T is BrotliTransforms*; result is const uint8_t*. */
#define BROTLI_TRANSFORM_PREFIX(T, I) (&(T)->prefix_suffix[ \
(T)->prefix_suffix_map[BROTLI_TRANSFORM_PREFIX_ID(T, I)]])
#define BROTLI_TRANSFORM_SUFFIX(T, I) (&(T)->prefix_suffix[ \
(T)->prefix_suffix_map[BROTLI_TRANSFORM_SUFFIX_ID(T, I)]])
BROTLI_COMMON_API const BrotliTransforms* BrotliGetTransforms(void);
BROTLI_COMMON_API int BrotliTransformDictionaryWord(
uint8_t* dst, const uint8_t* word, int len,
const BrotliTransforms* transforms, int transform_idx);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_COMMON_TRANSFORM_H_ */
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.IO.FileSystem.Watcher/tests/Args.ErrorEventArgs.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.IO.Tests
{
public class ErrorEventArgsTests
{
[Fact]
public void ErrorEventArgs_ctor()
{
Exception exception = new Exception();
ErrorEventArgs args = new ErrorEventArgs(exception);
Assert.Equal(exception, args.GetException());
// Make sure method is consistent.
Assert.Equal(exception, args.GetException());
}
[Fact]
public void ErrorEventArgs_ctor_Null()
{
ErrorEventArgs args = new ErrorEventArgs(null);
Assert.Null(args.GetException());
// Make sure method is consistent.
Assert.Null(args.GetException());
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.IO.Tests
{
public class ErrorEventArgsTests
{
[Fact]
public void ErrorEventArgs_ctor()
{
Exception exception = new Exception();
ErrorEventArgs args = new ErrorEventArgs(exception);
Assert.Equal(exception, args.GetException());
// Make sure method is consistent.
Assert.Equal(exception, args.GetException());
}
[Fact]
public void ErrorEventArgs_ctor_Null()
{
ErrorEventArgs args = new ErrorEventArgs(null);
Assert.Null(args.GetException());
// Make sure method is consistent.
Assert.Null(args.GetException());
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Collections.Concurrent/tests/IntRangePartitionerTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// IntRangePartitionerTests.cs - Tests for range partitioner for integer range.
//
// PLEASE NOTE !! - For tests that need to iterate the elements inside the partitions more
// than once, we need to call GetPartitions for the second time. Iterating a second times
// over the first enumerable<tuples> / IList<IEnumerator<tuples> will yield no elements
//
// PLEASE NOTE!! - we use lazy evaluation wherever possible to allow for more than Int32.MaxValue
// elements. ToArray / toList will result in an OOM
//
// Taken from:
// \qa\clr\testsrc\pfx\Functional\Common\Partitioner\YetiTests\RangePartitioner\IntRangePartitionerTests.cs
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System;
using System.Collections;
using System.Collections.Concurrent;
using System.Collections.Generic;
using Xunit;
namespace System.Collections.Concurrent.Tests
{
public class IntRangePartitionerTests
{
/// <summary>
/// Ensure that the partitioner returned has properties set correctly
/// </summary>
[Fact]
public static void CheckKeyProperties()
{
var partitioner = Partitioner.Create(0, 1000);
Assert.True(partitioner.KeysOrderedInEachPartition, "Expected KeysOrderedInEachPartition to be set to true");
Assert.False(partitioner.KeysOrderedAcrossPartitions, "KeysOrderedAcrossPartitions to be set to false");
Assert.True(partitioner.KeysNormalized, "Expected KeysNormalized to be set to true");
partitioner = Partitioner.Create(0, 1000, 90);
Assert.True(partitioner.KeysOrderedInEachPartition, "Expected KeysOrderedInEachPartition to be set to true");
Assert.False(partitioner.KeysOrderedAcrossPartitions, "KeysOrderedAcrossPartitions to be set to false");
Assert.True(partitioner.KeysNormalized, "Expected KeysNormalized to be set to true");
}
/// <summary>
/// GetPartitions returns an IList<IEnumerator<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// </summary>
[Theory]
[InlineData(0, 1, 1)]
[InlineData(1, 1999, 3)]
[InlineData(2147473647, 9999, 4)]
[InlineData(-1999, 5000, 63)]
[InlineData(-2147483648, 5000, 63)]
public static void CheckGetPartitions(int from, int count, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = dopPartitions.SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetPartitions element mismatch");
}
/// <summary>
/// CheckGetDynamicPartitions returns an IEnumerable<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// </summary>
/// <param name="from"></param>
/// <param name="count"></param>
[Theory]
[InlineData(0, 1)]
[InlineData(1, 1999)]
[InlineData(2147473647, 9999)]
[InlineData(-1999, 5000)]
[InlineData(-2147483648, 5000)]
public static void CheckGetDynamicPartitions(int from, int count)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = partitioner.GetDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetDynamicPartitions())
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetDynamicPartitions Element mismatch");
}
/// <summary>
/// GetOrderablePartitions returns an IList<IEnumerator<KeyValuePair<long, Tuple<int, int>>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// </summary>
[Theory]
[InlineData(0, 1, 1)]
[InlineData(1, 1999, 3)]
[InlineData(2147473647, 9999, 4)]
[InlineData(-1999, 5000, 63)]
[InlineData(-2147483648, 5000, 63)]
public static void CheckGetOrderablePartitions(int from, int count, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderablePartitions Element mismatch");
//var keys = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRollIndices()).ToArray();
IList<long> keys = new List<long>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.UnRollIndices())
keys.Add(item);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderablePartitions key mismatch");
}
/// <summary>
/// GetOrderableDynamicPartitions returns an IEnumerable<KeyValuePair<long, Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// </summary>
[Theory]
[InlineData(0, 1)]
[InlineData(1, 1999)]
[InlineData(2147473647, 9999)]
[InlineData(-1999, 5000)]
[InlineData(-2147483648, 5000)]
public static void GetOrderableDynamicPartitions(int from, int count)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = partitioner.GetOrderableDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetOrderableDynamicPartitions())
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderableDynamicPartitions Element mismatch");
//var keys = partitioner.GetOrderableDynamicPartitions().Select(tuple => tuple.Key).ToArray();
IList<long> keys = new List<long>();
foreach (var tuple in partitioner.GetOrderableDynamicPartitions())
{
keys.Add(tuple.Key);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderableDynamicPartitions key mismatch");
}
/// <summary>
/// GetPartitions returns an IList<IEnumerator<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20, 1)]
[InlineData(-1999, 1000, 100, 2)]
[InlineData(1999, 1, 2000, 3)]
[InlineData(2147482647, 999, 600, 4)]
[InlineData(-2147483648, 1000, 19, 63)]
public static void CheckGetPartitionsWithRange(int from, int count, int desiredRangeSize, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetPartitions(dop).SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetPartitions element mismatch");
//var rangeSizes = partitioner.GetPartitions(dop).SelectMany(enumerator => enumerator.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.GetRangeSize())
rangeSizes.Add(item);
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// CheckGetDynamicPartitionsWithRange returns an IEnumerable<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20)]
[InlineData(-1999, 1000, 100)]
[InlineData(1999, 1, 2000)]
[InlineData(2147482647, 999, 600)]
[InlineData(-2147483648, 1000, 19)]
public static void CheckGetDynamicPartitionsWithRange(int from, int count, int desiredRangeSize)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetDynamicPartitions())
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetDynamicPartitions Element mismatch");
//var rangeSizes = partitioner.GetDynamicPartitions().Select(tuple => tuple.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetDynamicPartitions())
{
rangeSizes.Add(partition.GetRangeSize());
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// GetOrderablePartitions returns an IList<IEnumerator<KeyValuePair<long, Tuple<int, int>>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20, 1)]
[InlineData(-1999, 1000, 100, 2)]
[InlineData(1999, 1, 2000, 3)]
[InlineData(2147482647, 999, 600, 4)]
[InlineData(-2147483648, 1000, 19, 63)]
public static void CheckGetOrderablePartitionsWithRange(int from, int count, int desiredRangeSize, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderablePartitions Element mismatch");
//var keys = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRollIndices()).ToArray();
IList<long> keys = new List<long>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.UnRollIndices())
keys.Add(item);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderablePartitions key mismatch");
//var rangeSizes = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.GetRangeSize())
rangeSizes.Add(item);
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// GetOrderableDynamicPartitions returns an IEnumerable<KeyValuePair<long, Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20)]
[InlineData(-1999, 1000, 100)]
[InlineData(1999, 1, 2000)]
[InlineData(2147482647, 999, 600)]
[InlineData(-2147483648, 1000, 19)]
public static void GetOrderableDynamicPartitionsWithRange(int from, int count, int desiredRangeSize)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetOrderableDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var tuple in partitioner.GetOrderableDynamicPartitions())
{
foreach (var item in tuple.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderableDynamicPartitions Element mismatch");
//var keys = partitioner.GetOrderableDynamicPartitions().Select(tuple => tuple.Key).ToArray();
IList<long> keys = new List<long>();
foreach (var tuple in partitioner.GetOrderableDynamicPartitions())
{
keys.Add(tuple.Key);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderableDynamicPartitions key mismatch");
//var rangeSizes = partitioner.GetOrderableDynamicPartitions().Select(tuple => tuple.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetOrderableDynamicPartitions())
{
rangeSizes.Add(partition.GetRangeSize());
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// Helper function to validate the range size of the partitioners match what the user specified
/// (desiredRangeSize).
/// </summary>
/// <param name="desiredRangeSize"></param>
/// <param name="rangeSizes"></param>
private static void ValidateRangeSize(int desiredRangeSize, IList<int> rangeSizes)
{
//var rangesWithDifferentRangeSize = rangeSizes.Take(rangeSizes.Length - 1).Where(r => r != desiredRangeSize).ToArray();
IList<int> rangesWithDifferentRangeSize = new List<int>();
// ensuring that every range, size from the last one is the same.
int numToTake = rangeSizes.Count - 1;
for (int i = 0; i < numToTake; i++)
{
int range = rangeSizes[i];
if (range != desiredRangeSize)
rangesWithDifferentRangeSize.Add(range);
}
Assert.Equal(0, rangesWithDifferentRangeSize.Count);
Assert.InRange(rangeSizes[rangeSizes.Count - 1], 0, desiredRangeSize);
}
/// <summary>
/// Ensure that the range partitioner doesn't chunk up elements i.e. uses chunk size = 1
/// </summary>
/// <param name="from"></param>
/// <param name="count"></param>
/// <param name="rangeSize"></param>
[Theory]
[InlineData(1999, 1000, 10)]
[InlineData(89, 17823, -1)]
public static void RangePartitionerChunking(int from, int count, int rangeSize)
{
int to = from + count;
var partitioner = (rangeSize == -1) ? Partitioner.Create(from, to) : Partitioner.Create(from, to, rangeSize);
// Check static partitions
var partitions = partitioner.GetPartitions(2);
// Initialize the from / to values from the first element
if (!partitions[0].MoveNext()) return;
Assert.Equal(from, partitions[0].Current.Item1);
if (rangeSize == -1)
{
rangeSize = partitions[0].Current.Item2 - partitions[0].Current.Item1;
}
int nextExpectedFrom = partitions[0].Current.Item2;
int nextExpectedTo = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
// Ensure that each partition gets one range only
// we check this by alternating partitions asking for elements and make sure
// that we get ranges in a sequence. If chunking were to happen then we wouldn't see a sequence
int actualCount = partitions[0].Current.Item2 - partitions[0].Current.Item1;
while (true)
{
if (!partitions[0].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[0].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[0].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[0].Current.Item2 - partitions[0].Current.Item1;
if (!partitions[1].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[1].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[1].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[1].Current.Item2 - partitions[1].Current.Item1;
if (!partitions[1].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[1].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[1].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[1].Current.Item2 - partitions[1].Current.Item1;
if (!partitions[0].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[0].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[0].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[0].Current.Item2 - partitions[0].Current.Item1;
}
// Verifying that all items are there
Assert.Equal(count, actualCount);
}
/// <summary>
/// Ensure that the range partitioner doesn't chunk up elements i.e. uses chunk size = 1
/// </summary>
/// <param name="from"></param>
/// <param name="count"></param>
/// <param name="rangeSize"></param>
[Theory]
[InlineData(1999, 1000, 10)]
[InlineData(1, 884354, -1)]
public static void RangePartitionerDynamicChunking(int from, int count, int rangeSize)
{
int to = from + count;
var partitioner = (rangeSize == -1) ? Partitioner.Create(from, to) : Partitioner.Create(from, to, rangeSize);
// Check static partitions
var partitions = partitioner.GetDynamicPartitions();
var partition1 = partitions.GetEnumerator();
var partition2 = partitions.GetEnumerator();
// Initialize the from / to values from the first element
if (!partition1.MoveNext()) return;
Assert.True(from == partition1.Current.Item1);
if (rangeSize == -1)
{
rangeSize = partition1.Current.Item2 - partition1.Current.Item1;
}
int nextExpectedFrom = partition1.Current.Item2;
int nextExpectedTo = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
// Ensure that each partition gets one range only
// we check this by alternating partitions asking for elements and make sure
// that we get ranges in a sequence. If chunking were to happen then we wouldn't see a sequence
int actualCount = partition1.Current.Item2 - partition1.Current.Item1;
while (true)
{
if (!partition1.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition1.Current.Item1);
Assert.Equal(nextExpectedTo, partition1.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition1.Current.Item2 - partition1.Current.Item1;
if (!partition2.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition2.Current.Item1);
Assert.Equal(nextExpectedTo, partition2.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition2.Current.Item2 - partition2.Current.Item1;
if (!partition2.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition2.Current.Item1);
Assert.Equal(nextExpectedTo, partition2.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition2.Current.Item2 - partition2.Current.Item1;
if (!partition1.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition1.Current.Item1);
Assert.Equal(nextExpectedTo, partition1.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition1.Current.Item2 - partition1.Current.Item1;
}
// Verifying that all items are there
Assert.Equal(count, actualCount);
}
/// <summary>
/// Ensure that the range partitioner doesn't exceed the exclusive bound
/// </summary>
/// <param name="fromInclusive"></param>
/// <param name="toExclusive"></param>
[Theory]
[InlineData(-1, int.MaxValue)]
[InlineData(int.MinValue, int.MaxValue)]
[InlineData(int.MinValue, -1)]
[InlineData(int.MinValue / 2, int.MaxValue / 2)]
public void TestPartitionerCreate(int fromInclusive, int toExclusive)
{
OrderablePartitioner<Tuple<int, int>> op = Partitioner.Create(fromInclusive, toExclusive);
int start = fromInclusive;
foreach (var p in op.GetDynamicPartitions())
{
Assert.Equal(start, p.Item1);
start = p.Item2;
}
Assert.Equal(toExclusive, start);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// IntRangePartitionerTests.cs - Tests for range partitioner for integer range.
//
// PLEASE NOTE !! - For tests that need to iterate the elements inside the partitions more
// than once, we need to call GetPartitions for the second time. Iterating a second times
// over the first enumerable<tuples> / IList<IEnumerator<tuples> will yield no elements
//
// PLEASE NOTE!! - we use lazy evaluation wherever possible to allow for more than Int32.MaxValue
// elements. ToArray / toList will result in an OOM
//
// Taken from:
// \qa\clr\testsrc\pfx\Functional\Common\Partitioner\YetiTests\RangePartitioner\IntRangePartitionerTests.cs
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System;
using System.Collections;
using System.Collections.Concurrent;
using System.Collections.Generic;
using Xunit;
namespace System.Collections.Concurrent.Tests
{
public class IntRangePartitionerTests
{
/// <summary>
/// Ensure that the partitioner returned has properties set correctly
/// </summary>
[Fact]
public static void CheckKeyProperties()
{
var partitioner = Partitioner.Create(0, 1000);
Assert.True(partitioner.KeysOrderedInEachPartition, "Expected KeysOrderedInEachPartition to be set to true");
Assert.False(partitioner.KeysOrderedAcrossPartitions, "KeysOrderedAcrossPartitions to be set to false");
Assert.True(partitioner.KeysNormalized, "Expected KeysNormalized to be set to true");
partitioner = Partitioner.Create(0, 1000, 90);
Assert.True(partitioner.KeysOrderedInEachPartition, "Expected KeysOrderedInEachPartition to be set to true");
Assert.False(partitioner.KeysOrderedAcrossPartitions, "KeysOrderedAcrossPartitions to be set to false");
Assert.True(partitioner.KeysNormalized, "Expected KeysNormalized to be set to true");
}
/// <summary>
/// GetPartitions returns an IList<IEnumerator<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// </summary>
[Theory]
[InlineData(0, 1, 1)]
[InlineData(1, 1999, 3)]
[InlineData(2147473647, 9999, 4)]
[InlineData(-1999, 5000, 63)]
[InlineData(-2147483648, 5000, 63)]
public static void CheckGetPartitions(int from, int count, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = dopPartitions.SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetPartitions element mismatch");
}
/// <summary>
/// CheckGetDynamicPartitions returns an IEnumerable<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// </summary>
/// <param name="from"></param>
/// <param name="count"></param>
[Theory]
[InlineData(0, 1)]
[InlineData(1, 1999)]
[InlineData(2147473647, 9999)]
[InlineData(-1999, 5000)]
[InlineData(-2147483648, 5000)]
public static void CheckGetDynamicPartitions(int from, int count)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = partitioner.GetDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetDynamicPartitions())
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetDynamicPartitions Element mismatch");
}
/// <summary>
/// GetOrderablePartitions returns an IList<IEnumerator<KeyValuePair<long, Tuple<int, int>>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// </summary>
[Theory]
[InlineData(0, 1, 1)]
[InlineData(1, 1999, 3)]
[InlineData(2147473647, 9999, 4)]
[InlineData(-1999, 5000, 63)]
[InlineData(-2147483648, 5000, 63)]
public static void CheckGetOrderablePartitions(int from, int count, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderablePartitions Element mismatch");
//var keys = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRollIndices()).ToArray();
IList<long> keys = new List<long>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.UnRollIndices())
keys.Add(item);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderablePartitions key mismatch");
}
/// <summary>
/// GetOrderableDynamicPartitions returns an IEnumerable<KeyValuePair<long, Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// </summary>
[Theory]
[InlineData(0, 1)]
[InlineData(1, 1999)]
[InlineData(2147473647, 9999)]
[InlineData(-1999, 5000)]
[InlineData(-2147483648, 5000)]
public static void GetOrderableDynamicPartitions(int from, int count)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to);
//var elements = partitioner.GetOrderableDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetOrderableDynamicPartitions())
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderableDynamicPartitions Element mismatch");
//var keys = partitioner.GetOrderableDynamicPartitions().Select(tuple => tuple.Key).ToArray();
IList<long> keys = new List<long>();
foreach (var tuple in partitioner.GetOrderableDynamicPartitions())
{
keys.Add(tuple.Key);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderableDynamicPartitions key mismatch");
}
/// <summary>
/// GetPartitions returns an IList<IEnumerator<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20, 1)]
[InlineData(-1999, 1000, 100, 2)]
[InlineData(1999, 1, 2000, 3)]
[InlineData(2147482647, 999, 600, 4)]
[InlineData(-2147483648, 1000, 19, 63)]
public static void CheckGetPartitionsWithRange(int from, int count, int desiredRangeSize, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetPartitions(dop).SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetPartitions element mismatch");
//var rangeSizes = partitioner.GetPartitions(dop).SelectMany(enumerator => enumerator.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetPartitions(dop))
{
foreach (var item in partition.GetRangeSize())
rangeSizes.Add(item);
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// CheckGetDynamicPartitionsWithRange returns an IEnumerable<Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20)]
[InlineData(-1999, 1000, 100)]
[InlineData(1999, 1, 2000)]
[InlineData(2147482647, 999, 600)]
[InlineData(-2147483648, 1000, 19)]
public static void CheckGetDynamicPartitionsWithRange(int from, int count, int desiredRangeSize)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetDynamicPartitions())
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetDynamicPartitions Element mismatch");
//var rangeSizes = partitioner.GetDynamicPartitions().Select(tuple => tuple.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetDynamicPartitions())
{
rangeSizes.Add(partition.GetRangeSize());
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// GetOrderablePartitions returns an IList<IEnumerator<KeyValuePair<long, Tuple<int, int>>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20, 1)]
[InlineData(-1999, 1000, 100, 2)]
[InlineData(1999, 1, 2000, 3)]
[InlineData(2147482647, 999, 600, 4)]
[InlineData(-2147483648, 1000, 19, 63)]
public static void CheckGetOrderablePartitionsWithRange(int from, int count, int desiredRangeSize, int dop)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRoll());
IList<int> elements = new List<int>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderablePartitions Element mismatch");
//var keys = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.UnRollIndices()).ToArray();
IList<long> keys = new List<long>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.UnRollIndices())
keys.Add(item);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderablePartitions key mismatch");
//var rangeSizes = partitioner.GetOrderablePartitions(dop).SelectMany(enumerator => enumerator.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetOrderablePartitions(dop))
{
foreach (var item in partition.GetRangeSize())
rangeSizes.Add(item);
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// GetOrderableDynamicPartitions returns an IEnumerable<KeyValuePair<long, Tuple<int, int>>
/// We unroll the tuples and flatten them to a single sequence
/// The single sequence is compared to the original range for verification
/// Also the indices are extracted to ensure that they are ordered & normalized
/// This method tests the partitioner created with user provided desiredRangeSize
/// The range sizes for individual ranges are checked to see if they are equal to
/// desiredRangeSize. The last range may have less than or equal to desiredRangeSize.
/// </summary>
[Theory]
[InlineData(1999, 1000, 20)]
[InlineData(-1999, 1000, 100)]
[InlineData(1999, 1, 2000)]
[InlineData(2147482647, 999, 600)]
[InlineData(-2147483648, 1000, 19)]
public static void GetOrderableDynamicPartitionsWithRange(int from, int count, int desiredRangeSize)
{
int to = from + count;
var partitioner = Partitioner.Create(from, to, desiredRangeSize);
//var elements = partitioner.GetOrderableDynamicPartitions().SelectMany(tuple => tuple.UnRoll());
IList<int> elements = new List<int>();
foreach (var tuple in partitioner.GetOrderableDynamicPartitions())
{
foreach (var item in tuple.UnRoll())
elements.Add(item);
}
Assert.True(elements.CompareSequences<int>(RangePartitionerHelpers.IntEnumerable(from, to)), "GetOrderableDynamicPartitions Element mismatch");
//var keys = partitioner.GetOrderableDynamicPartitions().Select(tuple => tuple.Key).ToArray();
IList<long> keys = new List<long>();
foreach (var tuple in partitioner.GetOrderableDynamicPartitions())
{
keys.Add(tuple.Key);
}
Assert.True(keys.CompareSequences<long>(RangePartitionerHelpers.LongEnumerable(keys[0], keys.Count)), "GetOrderableDynamicPartitions key mismatch");
//var rangeSizes = partitioner.GetOrderableDynamicPartitions().Select(tuple => tuple.GetRangeSize()).ToArray();
IList<int> rangeSizes = new List<int>();
foreach (var partition in partitioner.GetOrderableDynamicPartitions())
{
rangeSizes.Add(partition.GetRangeSize());
}
ValidateRangeSize(desiredRangeSize, rangeSizes);
}
/// <summary>
/// Helper function to validate the range size of the partitioners match what the user specified
/// (desiredRangeSize).
/// </summary>
/// <param name="desiredRangeSize"></param>
/// <param name="rangeSizes"></param>
private static void ValidateRangeSize(int desiredRangeSize, IList<int> rangeSizes)
{
//var rangesWithDifferentRangeSize = rangeSizes.Take(rangeSizes.Length - 1).Where(r => r != desiredRangeSize).ToArray();
IList<int> rangesWithDifferentRangeSize = new List<int>();
// ensuring that every range, size from the last one is the same.
int numToTake = rangeSizes.Count - 1;
for (int i = 0; i < numToTake; i++)
{
int range = rangeSizes[i];
if (range != desiredRangeSize)
rangesWithDifferentRangeSize.Add(range);
}
Assert.Equal(0, rangesWithDifferentRangeSize.Count);
Assert.InRange(rangeSizes[rangeSizes.Count - 1], 0, desiredRangeSize);
}
/// <summary>
/// Ensure that the range partitioner doesn't chunk up elements i.e. uses chunk size = 1
/// </summary>
/// <param name="from"></param>
/// <param name="count"></param>
/// <param name="rangeSize"></param>
[Theory]
[InlineData(1999, 1000, 10)]
[InlineData(89, 17823, -1)]
public static void RangePartitionerChunking(int from, int count, int rangeSize)
{
int to = from + count;
var partitioner = (rangeSize == -1) ? Partitioner.Create(from, to) : Partitioner.Create(from, to, rangeSize);
// Check static partitions
var partitions = partitioner.GetPartitions(2);
// Initialize the from / to values from the first element
if (!partitions[0].MoveNext()) return;
Assert.Equal(from, partitions[0].Current.Item1);
if (rangeSize == -1)
{
rangeSize = partitions[0].Current.Item2 - partitions[0].Current.Item1;
}
int nextExpectedFrom = partitions[0].Current.Item2;
int nextExpectedTo = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
// Ensure that each partition gets one range only
// we check this by alternating partitions asking for elements and make sure
// that we get ranges in a sequence. If chunking were to happen then we wouldn't see a sequence
int actualCount = partitions[0].Current.Item2 - partitions[0].Current.Item1;
while (true)
{
if (!partitions[0].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[0].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[0].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[0].Current.Item2 - partitions[0].Current.Item1;
if (!partitions[1].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[1].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[1].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[1].Current.Item2 - partitions[1].Current.Item1;
if (!partitions[1].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[1].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[1].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[1].Current.Item2 - partitions[1].Current.Item1;
if (!partitions[0].MoveNext()) break;
Assert.Equal(nextExpectedFrom, partitions[0].Current.Item1);
Assert.Equal(nextExpectedTo, partitions[0].Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partitions[0].Current.Item2 - partitions[0].Current.Item1;
}
// Verifying that all items are there
Assert.Equal(count, actualCount);
}
/// <summary>
/// Ensure that the range partitioner doesn't chunk up elements i.e. uses chunk size = 1
/// </summary>
/// <param name="from"></param>
/// <param name="count"></param>
/// <param name="rangeSize"></param>
[Theory]
[InlineData(1999, 1000, 10)]
[InlineData(1, 884354, -1)]
public static void RangePartitionerDynamicChunking(int from, int count, int rangeSize)
{
int to = from + count;
var partitioner = (rangeSize == -1) ? Partitioner.Create(from, to) : Partitioner.Create(from, to, rangeSize);
// Check static partitions
var partitions = partitioner.GetDynamicPartitions();
var partition1 = partitions.GetEnumerator();
var partition2 = partitions.GetEnumerator();
// Initialize the from / to values from the first element
if (!partition1.MoveNext()) return;
Assert.True(from == partition1.Current.Item1);
if (rangeSize == -1)
{
rangeSize = partition1.Current.Item2 - partition1.Current.Item1;
}
int nextExpectedFrom = partition1.Current.Item2;
int nextExpectedTo = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
// Ensure that each partition gets one range only
// we check this by alternating partitions asking for elements and make sure
// that we get ranges in a sequence. If chunking were to happen then we wouldn't see a sequence
int actualCount = partition1.Current.Item2 - partition1.Current.Item1;
while (true)
{
if (!partition1.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition1.Current.Item1);
Assert.Equal(nextExpectedTo, partition1.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition1.Current.Item2 - partition1.Current.Item1;
if (!partition2.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition2.Current.Item1);
Assert.Equal(nextExpectedTo, partition2.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition2.Current.Item2 - partition2.Current.Item1;
if (!partition2.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition2.Current.Item1);
Assert.Equal(nextExpectedTo, partition2.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition2.Current.Item2 - partition2.Current.Item1;
if (!partition1.MoveNext()) break;
Assert.Equal(nextExpectedFrom, partition1.Current.Item1);
Assert.Equal(nextExpectedTo, partition1.Current.Item2);
nextExpectedFrom = (nextExpectedFrom + rangeSize) > to ? to : (nextExpectedFrom + rangeSize);
nextExpectedTo = (nextExpectedTo + rangeSize) > to ? to : (nextExpectedTo + rangeSize);
actualCount += partition1.Current.Item2 - partition1.Current.Item1;
}
// Verifying that all items are there
Assert.Equal(count, actualCount);
}
/// <summary>
/// Ensure that the range partitioner doesn't exceed the exclusive bound
/// </summary>
/// <param name="fromInclusive"></param>
/// <param name="toExclusive"></param>
[Theory]
[InlineData(-1, int.MaxValue)]
[InlineData(int.MinValue, int.MaxValue)]
[InlineData(int.MinValue, -1)]
[InlineData(int.MinValue / 2, int.MaxValue / 2)]
public void TestPartitionerCreate(int fromInclusive, int toExclusive)
{
OrderablePartitioner<Tuple<int, int>> op = Partitioner.Create(fromInclusive, toExclusive);
int start = fromInclusive;
foreach (var p in op.GetDynamicPartitions())
{
Assert.Equal(start, p.Item1);
start = p.Item2;
}
Assert.Equal(toExclusive, start);
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Data.Common/tests/System/Data/Common/DbDataReaderMock.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// Copyright (C) 2014 Mika Aalto
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
#nullable enable
using System.Collections;
using System.Linq;
using System.Data.Common;
namespace System.Data.Common.Tests
{
internal class DbDataReaderMock : DbDataReader
{
protected int _currentRowIndex = -1;
protected DataTable _testDataTable;
public DbDataReaderMock()
=> _testDataTable = new DataTable();
public DbDataReaderMock(DataTable testData)
=> _testDataTable = testData ?? throw new ArgumentNullException(nameof(testData));
public override void Close() => _testDataTable.Clear();
public override int Depth => throw new NotImplementedException();
public override int FieldCount => throw new NotImplementedException();
public override bool GetBoolean(int ordinal) => (bool)GetValue(ordinal);
public override byte GetByte(int ordinal) => (byte)GetValue(ordinal);
public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length)
{
object value = GetValue(ordinal);
if (value == DBNull.Value)
{
return 0;
}
byte[] data = (byte[])value;
if (buffer is null)
{
return data.Length;
}
long bytesToRead = Math.Min(data.Length - dataOffset, length);
Buffer.BlockCopy(data, (int)dataOffset, buffer, bufferOffset, (int)bytesToRead);
return bytesToRead;
}
public override char GetChar(int ordinal) => (char)GetValue(ordinal);
public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length)
{
object value = GetValue(ordinal);
if (value == DBNull.Value)
{
return 0;
}
char[] data = value.ToString()!.ToCharArray();
if (buffer is null)
{
return data.Length;
}
long bytesToRead = Math.Min(data.Length - dataOffset, length);
Array.Copy(data, dataOffset, buffer, bufferOffset, bytesToRead);
return bytesToRead;
}
public override string GetDataTypeName(int ordinal) => throw new NotImplementedException();
public override DateTime GetDateTime(int ordinal) => (DateTime)GetValue(ordinal);
public override decimal GetDecimal(int ordinal) => (decimal)GetValue(ordinal);
public override double GetDouble(int ordinal) => (double)GetValue(ordinal);
public override IEnumerator GetEnumerator() => throw new NotImplementedException();
public override Type GetFieldType(int ordinal) => throw new NotImplementedException();
public override float GetFloat(int ordinal) => (float)GetValue(ordinal);
public override Guid GetGuid(int ordinal) => (Guid)GetValue(ordinal);
public override short GetInt16(int ordinal) => (short)GetValue(ordinal);
public override int GetInt32(int ordinal) => (int)GetValue(ordinal);
public override long GetInt64(int ordinal) => (long)GetValue(ordinal);
public override string GetName(int ordinal) => _testDataTable.Columns[ordinal].ColumnName;
public override int GetOrdinal(string name)
{
// TODO: not efficient; needs to cache the columns
for (var i = 0; i < _testDataTable.Columns.Count; ++i)
{
var columnName = _testDataTable.Columns[i].ColumnName;
if (columnName.Equals(name, StringComparison.OrdinalIgnoreCase))
{
return i;
}
}
return -1;
}
public override string GetString(int ordinal) => (string)_testDataTable.Rows[_currentRowIndex][ordinal];
public override object GetValue(int ordinal) => _testDataTable.Rows[_currentRowIndex][ordinal];
public override int GetValues(object[] values) => throw new NotImplementedException();
public override bool HasRows => throw new NotImplementedException();
public override bool IsClosed => throw new NotImplementedException();
public override bool IsDBNull(int ordinal) => _testDataTable.Rows[_currentRowIndex][ordinal] == DBNull.Value;
public override bool NextResult() => throw new NotImplementedException();
public override bool Read()
{
_currentRowIndex++;
return _currentRowIndex < _testDataTable.Rows.Count;
}
public override int RecordsAffected => throw new NotImplementedException();
public override object this[string name] => throw new NotImplementedException();
public override object this[int ordinal] => throw new NotImplementedException();
}
internal class SchemaDbDataReaderMock : DbDataReaderMock
{
public SchemaDbDataReaderMock(DataTable testData) : base(testData) {}
public override DataTable GetSchemaTable()
{
var table = new DataTable("SchemaTable");
table.Columns.Add("ColumnName", typeof(string));
table.Columns.Add("DataType", typeof(Type));
foreach (var column in _testDataTable.Columns.Cast<DataColumn>())
{
var row = table.NewRow();
row["ColumnName"] = column.ColumnName;
row["DataType"] = column.DataType;
table.Rows.Add(row);
}
return table;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// Copyright (C) 2014 Mika Aalto
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
#nullable enable
using System.Collections;
using System.Linq;
using System.Data.Common;
namespace System.Data.Common.Tests
{
internal class DbDataReaderMock : DbDataReader
{
protected int _currentRowIndex = -1;
protected DataTable _testDataTable;
public DbDataReaderMock()
=> _testDataTable = new DataTable();
public DbDataReaderMock(DataTable testData)
=> _testDataTable = testData ?? throw new ArgumentNullException(nameof(testData));
public override void Close() => _testDataTable.Clear();
public override int Depth => throw new NotImplementedException();
public override int FieldCount => throw new NotImplementedException();
public override bool GetBoolean(int ordinal) => (bool)GetValue(ordinal);
public override byte GetByte(int ordinal) => (byte)GetValue(ordinal);
public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length)
{
object value = GetValue(ordinal);
if (value == DBNull.Value)
{
return 0;
}
byte[] data = (byte[])value;
if (buffer is null)
{
return data.Length;
}
long bytesToRead = Math.Min(data.Length - dataOffset, length);
Buffer.BlockCopy(data, (int)dataOffset, buffer, bufferOffset, (int)bytesToRead);
return bytesToRead;
}
public override char GetChar(int ordinal) => (char)GetValue(ordinal);
public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length)
{
object value = GetValue(ordinal);
if (value == DBNull.Value)
{
return 0;
}
char[] data = value.ToString()!.ToCharArray();
if (buffer is null)
{
return data.Length;
}
long bytesToRead = Math.Min(data.Length - dataOffset, length);
Array.Copy(data, dataOffset, buffer, bufferOffset, bytesToRead);
return bytesToRead;
}
public override string GetDataTypeName(int ordinal) => throw new NotImplementedException();
public override DateTime GetDateTime(int ordinal) => (DateTime)GetValue(ordinal);
public override decimal GetDecimal(int ordinal) => (decimal)GetValue(ordinal);
public override double GetDouble(int ordinal) => (double)GetValue(ordinal);
public override IEnumerator GetEnumerator() => throw new NotImplementedException();
public override Type GetFieldType(int ordinal) => throw new NotImplementedException();
public override float GetFloat(int ordinal) => (float)GetValue(ordinal);
public override Guid GetGuid(int ordinal) => (Guid)GetValue(ordinal);
public override short GetInt16(int ordinal) => (short)GetValue(ordinal);
public override int GetInt32(int ordinal) => (int)GetValue(ordinal);
public override long GetInt64(int ordinal) => (long)GetValue(ordinal);
public override string GetName(int ordinal) => _testDataTable.Columns[ordinal].ColumnName;
public override int GetOrdinal(string name)
{
// TODO: not efficient; needs to cache the columns
for (var i = 0; i < _testDataTable.Columns.Count; ++i)
{
var columnName = _testDataTable.Columns[i].ColumnName;
if (columnName.Equals(name, StringComparison.OrdinalIgnoreCase))
{
return i;
}
}
return -1;
}
public override string GetString(int ordinal) => (string)_testDataTable.Rows[_currentRowIndex][ordinal];
public override object GetValue(int ordinal) => _testDataTable.Rows[_currentRowIndex][ordinal];
public override int GetValues(object[] values) => throw new NotImplementedException();
public override bool HasRows => throw new NotImplementedException();
public override bool IsClosed => throw new NotImplementedException();
public override bool IsDBNull(int ordinal) => _testDataTable.Rows[_currentRowIndex][ordinal] == DBNull.Value;
public override bool NextResult() => throw new NotImplementedException();
public override bool Read()
{
_currentRowIndex++;
return _currentRowIndex < _testDataTable.Rows.Count;
}
public override int RecordsAffected => throw new NotImplementedException();
public override object this[string name] => throw new NotImplementedException();
public override object this[int ordinal] => throw new NotImplementedException();
}
internal class SchemaDbDataReaderMock : DbDataReaderMock
{
public SchemaDbDataReaderMock(DataTable testData) : base(testData) {}
public override DataTable GetSchemaTable()
{
var table = new DataTable("SchemaTable");
table.Columns.Add("ColumnName", typeof(string));
table.Columns.Add("DataType", typeof(Type));
foreach (var column in _testDataTable.Columns.Cast<DataColumn>())
{
var row = table.NewRow();
row["ColumnName"] = column.ColumnName;
row["DataType"] = column.DataType;
table.Rows.Add(row);
}
return table;
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Private.Xml/tests/Writers/RwFactory/CXmlDriverVariation.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using OLEDB.Test.ModuleCore;
namespace System.Xml.Tests
{
/// <summary>
/// CXmlDriverVariation
/// </summary>
public class CXmlDriverVariation : CVariation
{
private CXmlDriverParam _xmlDriverParams;
//Constructor
internal CXmlDriverVariation(CXmlDriverScenario testCase,
string name, string description, int id, int pri,
CXmlDriverParam xmlDriverParams) : base(testCase)
{
_xmlDriverParams = xmlDriverParams;
// use name as a description if provided
if (name != null)
this.Desc = name;
else
this.Desc = description;
this.Name = name;
this.Pri = pri;
this.id = id;
}
private bool CheckSkipped()
{
string skipped = XmlDriverParam.GetTopLevelAttributeValue("Skipped");
if (skipped == null || !bool.Parse(skipped))
return true;
return false;
}
public override tagVARIATION_STATUS Execute()
{
tagVARIATION_STATUS res = (tagVARIATION_STATUS)TEST_FAIL;
try
{
if (!CheckSkipped()) return (tagVARIATION_STATUS)TEST_SKIPPED;
CXmlDriverScenario scenario = (CXmlDriverScenario)Parent;
res = (tagVARIATION_STATUS)scenario.ExecuteVariation(XmlDriverParam);
}
catch (CTestSkippedException e)
{
res = (tagVARIATION_STATUS)HandleException(e);
}
catch (Exception e)
{
res = (tagVARIATION_STATUS)HandleException(e);
}
return res;
}
public CXmlDriverParam XmlDriverParam { get { return _xmlDriverParams; } set { _xmlDriverParams = value; } }
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using OLEDB.Test.ModuleCore;
namespace System.Xml.Tests
{
/// <summary>
/// CXmlDriverVariation
/// </summary>
public class CXmlDriverVariation : CVariation
{
private CXmlDriverParam _xmlDriverParams;
//Constructor
internal CXmlDriverVariation(CXmlDriverScenario testCase,
string name, string description, int id, int pri,
CXmlDriverParam xmlDriverParams) : base(testCase)
{
_xmlDriverParams = xmlDriverParams;
// use name as a description if provided
if (name != null)
this.Desc = name;
else
this.Desc = description;
this.Name = name;
this.Pri = pri;
this.id = id;
}
private bool CheckSkipped()
{
string skipped = XmlDriverParam.GetTopLevelAttributeValue("Skipped");
if (skipped == null || !bool.Parse(skipped))
return true;
return false;
}
public override tagVARIATION_STATUS Execute()
{
tagVARIATION_STATUS res = (tagVARIATION_STATUS)TEST_FAIL;
try
{
if (!CheckSkipped()) return (tagVARIATION_STATUS)TEST_SKIPPED;
CXmlDriverScenario scenario = (CXmlDriverScenario)Parent;
res = (tagVARIATION_STATUS)scenario.ExecuteVariation(XmlDriverParam);
}
catch (CTestSkippedException e)
{
res = (tagVARIATION_STATUS)HandleException(e);
}
catch (Exception e)
{
res = (tagVARIATION_STATUS)HandleException(e);
}
return res;
}
public CXmlDriverParam XmlDriverParam { get { return _xmlDriverParams; } set { _xmlDriverParams = value; } }
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1313/Generated1313.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated1313.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated1313.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/pal/tests/palsuite/composite/synchronization/nativecriticalsection/mtx_critsect.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <pthread.h>
typedef void VOID;
typedef unsigned long DWORD;
typedef long LONG;
typedef unsigned long ULONG;
typedef void* HANDLE;
typedef unsigned long ULONG_PTR;
#define FALSE 0
#define TRUE 1
#define CSBIT_CS_IS_LOCKED 1
#define CSBIT_NEW_WAITER 2
typedef enum CsInitState { CS_NOT_INIZIALIZED, CS_INITIALIZED, CS_FULLY_INITIALIZED } CsInitState;
typedef enum _CsWaiterReturnState { CS_WAITER_WOKEN_UP, CS_WAITER_DIDNT_WAIT } CsWaiterReturnState;
typedef struct _CRITICAL_SECTION_DEBUG_INFO {
LONG volatile ContentionCount;
LONG volatile InternalContentionCount;
ULONG volatile AcquireCount;
ULONG volatile EnterCount;
} CRITICAL_SECTION_DEBUG_INFO, *PCRITICAL_SECTION_DEBUG_INFO;
typedef struct _CRITICAL_SECTION_NATIVE_DATA {
pthread_mutex_t Mutex;
} CRITICAL_SECTION_NATIVE_DATA, *PCRITICAL_SECTION_NATIVE_DATA;
typedef struct _CRITICAL_SECTION {
CsInitState InitCount;
PCRITICAL_SECTION_DEBUG_INFO DebugInfo;
LONG LockCount;
LONG RecursionCount;
HANDLE OwningThread;
HANDLE LockSemaphore;
ULONG_PTR SpinCount;
CRITICAL_SECTION_NATIVE_DATA NativeData;
} CRITICAL_SECTION, *PCRITICAL_SECTION, *LPCRITICAL_SECTION;
int MTXInitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
int MTXDeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
int MTXEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
int MTXLeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <pthread.h>
typedef void VOID;
typedef unsigned long DWORD;
typedef long LONG;
typedef unsigned long ULONG;
typedef void* HANDLE;
typedef unsigned long ULONG_PTR;
#define FALSE 0
#define TRUE 1
#define CSBIT_CS_IS_LOCKED 1
#define CSBIT_NEW_WAITER 2
typedef enum CsInitState { CS_NOT_INIZIALIZED, CS_INITIALIZED, CS_FULLY_INITIALIZED } CsInitState;
typedef enum _CsWaiterReturnState { CS_WAITER_WOKEN_UP, CS_WAITER_DIDNT_WAIT } CsWaiterReturnState;
typedef struct _CRITICAL_SECTION_DEBUG_INFO {
LONG volatile ContentionCount;
LONG volatile InternalContentionCount;
ULONG volatile AcquireCount;
ULONG volatile EnterCount;
} CRITICAL_SECTION_DEBUG_INFO, *PCRITICAL_SECTION_DEBUG_INFO;
typedef struct _CRITICAL_SECTION_NATIVE_DATA {
pthread_mutex_t Mutex;
} CRITICAL_SECTION_NATIVE_DATA, *PCRITICAL_SECTION_NATIVE_DATA;
typedef struct _CRITICAL_SECTION {
CsInitState InitCount;
PCRITICAL_SECTION_DEBUG_INFO DebugInfo;
LONG LockCount;
LONG RecursionCount;
HANDLE OwningThread;
HANDLE LockSemaphore;
ULONG_PTR SpinCount;
CRITICAL_SECTION_NATIVE_DATA NativeData;
} CRITICAL_SECTION, *PCRITICAL_SECTION, *LPCRITICAL_SECTION;
int MTXInitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
int MTXDeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
int MTXEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
int MTXLeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection);
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/Loader/classloader/rmv/il/RMV-2-8-31-two.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="RMV-2-8-31-two.il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="RMV-2-8-31-two.il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Configuration.ConfigurationManager/tests/Mono/PositiveTimeSpanValidatorTest.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// System.Configuration.PositiveTimeSpanValidatorTest.cs - Unit tests
// for System.Configuration.PositiveTimeSpanValidator.
//
// Author:
// Chris Toshok <[email protected]>
//
// Copyright (C) 2005 Novell, Inc (http://www.novell.com)
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
using System;
using System.Configuration;
using Xunit;
namespace MonoTests.System.Configuration
{
public class PositiveTimeSpanValidatorTest
{
[Fact]
public void CanValidate()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
Assert.True(v.CanValidate(typeof(TimeSpan)), "A1");
Assert.False(v.CanValidate(typeof(int)), "A2");
Assert.False(v.CanValidate(typeof(long)), "A3");
}
[Fact]
public void Validate_inRange()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
v.Validate(new TimeSpan(7000));
}
[Fact]
public void Validate_fail1()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
AssertExtensions.Throws<ArgumentException>(null, () => v.Validate(new TimeSpan(0)));
}
[Fact]
public void Validate_fail2()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
AssertExtensions.Throws<ArgumentException>(null, () => v.Validate(new TimeSpan(-10000)));
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// System.Configuration.PositiveTimeSpanValidatorTest.cs - Unit tests
// for System.Configuration.PositiveTimeSpanValidator.
//
// Author:
// Chris Toshok <[email protected]>
//
// Copyright (C) 2005 Novell, Inc (http://www.novell.com)
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
using System;
using System.Configuration;
using Xunit;
namespace MonoTests.System.Configuration
{
public class PositiveTimeSpanValidatorTest
{
[Fact]
public void CanValidate()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
Assert.True(v.CanValidate(typeof(TimeSpan)), "A1");
Assert.False(v.CanValidate(typeof(int)), "A2");
Assert.False(v.CanValidate(typeof(long)), "A3");
}
[Fact]
public void Validate_inRange()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
v.Validate(new TimeSpan(7000));
}
[Fact]
public void Validate_fail1()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
AssertExtensions.Throws<ArgumentException>(null, () => v.Validate(new TimeSpan(0)));
}
[Fact]
public void Validate_fail2()
{
PositiveTimeSpanValidator v = new PositiveTimeSpanValidator();
AssertExtensions.Throws<ArgumentException>(null, () => v.Validate(new TimeSpan(-10000)));
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Security.Cryptography.Primitives/src/System.Security.Cryptography.Primitives.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<IsPartialFacadeAssembly>true</IsPartialFacadeAssembly>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
</ItemGroup>
<ItemGroup>
<Reference Include="System.Runtime" />
<Reference Include="System.Security.Cryptography" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<IsPartialFacadeAssembly>true</IsPartialFacadeAssembly>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
</ItemGroup>
<ItemGroup>
<Reference Include="System.Runtime" />
<Reference Include="System.Security.Cryptography" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/tools/dotnet-pgo/MibcEmitter.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Internal.TypeSystem;
using Internal.TypeSystem.Ecma;
using Internal.IL;
using Microsoft.Diagnostics.Tracing;
using Microsoft.Diagnostics.Tracing.Etlx;
using Microsoft.Diagnostics.Tracing.Parsers.Clr;
using System;
using System.Buffers.Binary;
using System.Collections.Generic;
using System.IO;
using System.Reflection;
using System.Text;
using System.Linq;
using System.Diagnostics;
using System.Globalization;
using System.Threading.Tasks;
using System.Reflection.Metadata;
using System.Reflection.Metadata.Ecma335;
using System.IO.Compression;
using Microsoft.Diagnostics.Tracing.Parsers.Kernel;
using System.Diagnostics.CodeAnalysis;
using ILCompiler.Reflection.ReadyToRun;
using Microsoft.Diagnostics.Tools.Pgo;
using Internal.Pgo;
using ILCompiler.IBC;
using ILCompiler;
namespace Microsoft.Diagnostics.Tools.Pgo
{
static class MibcEmitter
{
class MIbcGroup : IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>
{
private static int s_emitCount = 0;
public MIbcGroup(string name, TypeSystemMetadataEmitter emitter)
{
_buffer = new BlobBuilder();
_il = new InstructionEncoder(_buffer);
_name = name;
_emitter = emitter;
}
private BlobBuilder _buffer;
private InstructionEncoder _il;
private string _name;
private TypeSystemMetadataEmitter _emitter;
public void AddProcessedMethodData(MethodProfileData processedMethodData)
{
MethodDesc method = processedMethodData.Method;
// Format is
// ldtoken method
// variable amount of extra metadata about the method, Extension data is encoded via ldstr "id"
// pop
// Extensions generated by this emitter:
//
// ldstr "ExclusiveWeight"
// Any ldc.i4 or ldc.r4 or ldc.r8 instruction to indicate the exclusive weight
//
// ldstr "WeightedCallData"
// ldc.i4 <Count of methods called>
// Repeat <Count of methods called times>
// ldtoken <Method called from this method>
// ldc.i4 <Weight associated with calling the <Method called from this method>>
//
// ldstr "InstrumentationDataStart"
// Encoded ints and longs, using ldc.i4, and ldc.i8 instructions as well as ldtoken <type> instructions
// ldstr "InstrumentationDataEnd" as a terminator
try
{
EntityHandle methodHandle = _emitter.GetMethodRef(method);
_il.OpCode(ILOpCode.Ldtoken);
_il.Token(methodHandle);
if (processedMethodData.ExclusiveWeight != 0)
{
_il.LoadString(_emitter.GetUserStringHandle("ExclusiveWeight"));
if (((double)(int)processedMethodData.ExclusiveWeight) == processedMethodData.ExclusiveWeight)
_il.LoadConstantI4((int)processedMethodData.ExclusiveWeight);
else
_il.LoadConstantR8(processedMethodData.ExclusiveWeight);
}
if ((processedMethodData.CallWeights != null) && processedMethodData.CallWeights.Count > 0)
{
_il.LoadString(_emitter.GetUserStringHandle("WeightedCallData"));
_il.LoadConstantI4(processedMethodData.CallWeights.Count);
foreach (var entry in processedMethodData.CallWeights)
{
EntityHandle calledMethod = _emitter.GetMethodRef(entry.Key);
_il.OpCode(ILOpCode.Ldtoken);
_il.Token(calledMethod);
_il.LoadConstantI4(entry.Value);
}
}
if (processedMethodData.SchemaData != null)
{
_il.LoadString(_emitter.GetUserStringHandle("InstrumentationDataStart"));
PgoProcessor.EncodePgoData<TypeSystemEntityOrUnknown>(processedMethodData.SchemaData, this, true);
}
_il.OpCode(ILOpCode.Pop);
}
catch (Exception ex)
{
Program.PrintWarning($"Exception {ex} while attempting to generate method lists");
}
}
public MethodDefinitionHandle EmitMethod()
{
s_emitCount++;
string basicName = "Assemblies_" + _name;
if (_name.Length > 200)
basicName = basicName.Substring(0, 200); // Cap length of name at 200, which is reasonably small.
string methodName = basicName + "_" + s_emitCount.ToString(CultureInfo.InvariantCulture);
return _emitter.AddGlobalMethod(methodName, _il, 8);
}
bool IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>.EmitDone()
{
_il.LoadString(_emitter.GetUserStringHandle("InstrumentationDataEnd"));
return true;
}
void IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>.EmitLong(long value, long previousValue)
{
if ((value <= int.MaxValue) && (value >= int.MinValue))
{
_il.LoadConstantI4(checked((int)value));
}
else
{
_il.LoadConstantI8(value);
}
}
void IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>.EmitType(TypeSystemEntityOrUnknown type, TypeSystemEntityOrUnknown previousValue)
{
if (type.AsType != null)
{
_il.OpCode(ILOpCode.Ldtoken);
_il.Token(_emitter.GetTypeRef(type.AsType));
}
else
{
_il.LoadConstantI4(type.AsUnknown & 0x00FFFFFF);
}
}
}
private static string GetTypeDefiningAssembly(TypeDesc type)
{
return ((MetadataType)type).Module.Assembly.GetName().Name;
}
private static void AddAssembliesAssociatedWithType(TypeDesc type, HashSet<string> assemblies, out string definingAssembly)
{
definingAssembly = GetTypeDefiningAssembly(type);
assemblies.Add(definingAssembly);
AddAssembliesAssociatedWithType(type, assemblies);
}
private static void AddAssembliesAssociatedWithType(TypeDesc type, HashSet<string> assemblies)
{
if (type.IsPrimitive)
return;
if (type.Context.IsCanonicalDefinitionType(type, CanonicalFormKind.Any))
return;
if (type.IsParameterizedType)
{
AddAssembliesAssociatedWithType(type.GetParameterType(), assemblies);
}
else
{
assemblies.Add(GetTypeDefiningAssembly(type));
foreach (var instantiationType in type.Instantiation)
{
AddAssembliesAssociatedWithType(instantiationType, assemblies);
}
}
}
private static void AddAssembliesAssociatedWithMethod(MethodDesc method, HashSet<string> assemblies, out string definingAssembly)
{
AddAssembliesAssociatedWithType(method.OwningType, assemblies, out definingAssembly);
foreach (var instantiationType in method.Instantiation)
{
AddAssembliesAssociatedWithType(instantiationType, assemblies);
}
}
public static int GenerateMibcFile(TypeSystemContext tsc, FileInfo outputFileName, IEnumerable<MethodProfileData> methodsToAttemptToPlaceIntoProfileData, bool validate, bool uncompressed)
{
TypeSystemMetadataEmitter emitter = new TypeSystemMetadataEmitter(new AssemblyName(outputFileName.Name), tsc);
SortedDictionary<string, MIbcGroup> groups = new SortedDictionary<string, MIbcGroup>();
StringBuilder mibcGroupNameBuilder = new StringBuilder();
HashSet<string> assembliesAssociatedWithMethod = new HashSet<string>();
foreach (var entry in methodsToAttemptToPlaceIntoProfileData)
{
MethodDesc method = entry.Method;
assembliesAssociatedWithMethod.Clear();
AddAssembliesAssociatedWithMethod(method, assembliesAssociatedWithMethod, out string definingAssembly);
string[] assemblyNames = new string[assembliesAssociatedWithMethod.Count];
int i = 1;
assemblyNames[0] = definingAssembly;
foreach (string s in assembliesAssociatedWithMethod)
{
if (s.Equals(definingAssembly))
continue;
assemblyNames[i++] = s;
}
// Always keep the defining assembly as the first name
Array.Sort(assemblyNames, 1, assemblyNames.Length - 1);
mibcGroupNameBuilder.Clear();
foreach (string s in assemblyNames)
{
mibcGroupNameBuilder.Append(s);
mibcGroupNameBuilder.Append(';');
}
string mibcGroupName = mibcGroupNameBuilder.ToString();
if (!groups.TryGetValue(mibcGroupName, out MIbcGroup mibcGroup))
{
mibcGroup = new MIbcGroup(mibcGroupName, emitter);
groups.Add(mibcGroupName, mibcGroup);
}
mibcGroup.AddProcessedMethodData(entry);
}
var buffer = new BlobBuilder();
var il = new InstructionEncoder(buffer);
foreach (var entry in groups)
{
il.LoadString(emitter.GetUserStringHandle(entry.Key));
il.OpCode(ILOpCode.Ldtoken);
il.Token(entry.Value.EmitMethod());
il.OpCode(ILOpCode.Pop);
}
emitter.AddGlobalMethod("AssemblyDictionary", il, 8);
MemoryStream peFile = new MemoryStream();
emitter.SerializeToStream(peFile);
peFile.Position = 0;
if (outputFileName.Exists)
{
outputFileName.Delete();
}
if (uncompressed)
{
using (FileStream file = new FileStream(outputFileName.FullName, FileMode.Create))
{
peFile.CopyTo(file);
}
}
else
{
using (ZipArchive file = ZipFile.Open(outputFileName.FullName, ZipArchiveMode.Create))
{
var entry = file.CreateEntry(outputFileName.Name + ".dll", CompressionLevel.Optimal);
using (Stream archiveStream = entry.Open())
{
peFile.CopyTo(archiveStream);
}
}
}
Program.PrintMessage($"Generated {outputFileName.FullName}");
if (validate)
return ValidateMIbcData(tsc, outputFileName, peFile.ToArray(), methodsToAttemptToPlaceIntoProfileData);
else
return 0;
}
static int ValidateMIbcData(TypeSystemContext tsc, FileInfo outputFileName, byte[] moduleBytes, IEnumerable<MethodProfileData> methodsToAttemptToPrepare)
{
var peReader = new System.Reflection.PortableExecutable.PEReader(System.Collections.Immutable.ImmutableArray.Create<byte>(moduleBytes));
var profileData = MIbcProfileParser.ParseMIbcFile(tsc, peReader, null, null);
Dictionary<MethodDesc, MethodProfileData> mibcDict = new Dictionary<MethodDesc, MethodProfileData>();
foreach (var mibcData in profileData.GetAllMethodProfileData())
{
mibcDict.Add((MethodDesc)(object)mibcData.Method, mibcData);
}
bool failure = false;
if (methodsToAttemptToPrepare.Count() != mibcDict.Count)
{
Program.PrintError($"Not same count of methods {methodsToAttemptToPrepare.Count()} != {mibcDict.Count}");
failure = true;
}
foreach (var entry in methodsToAttemptToPrepare)
{
MethodDesc method = entry.Method;
if (!mibcDict.ContainsKey(method))
{
Program.PrintError($"{method} not found in mibcEntryData");
failure = true;
continue;
}
}
if (failure)
{
return -1;
}
else
{
Program.PrintMessage($"Validated {outputFileName.FullName}");
return 0;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Internal.TypeSystem;
using Internal.TypeSystem.Ecma;
using Internal.IL;
using Microsoft.Diagnostics.Tracing;
using Microsoft.Diagnostics.Tracing.Etlx;
using Microsoft.Diagnostics.Tracing.Parsers.Clr;
using System;
using System.Buffers.Binary;
using System.Collections.Generic;
using System.IO;
using System.Reflection;
using System.Text;
using System.Linq;
using System.Diagnostics;
using System.Globalization;
using System.Threading.Tasks;
using System.Reflection.Metadata;
using System.Reflection.Metadata.Ecma335;
using System.IO.Compression;
using Microsoft.Diagnostics.Tracing.Parsers.Kernel;
using System.Diagnostics.CodeAnalysis;
using ILCompiler.Reflection.ReadyToRun;
using Microsoft.Diagnostics.Tools.Pgo;
using Internal.Pgo;
using ILCompiler.IBC;
using ILCompiler;
namespace Microsoft.Diagnostics.Tools.Pgo
{
static class MibcEmitter
{
class MIbcGroup : IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>
{
private static int s_emitCount = 0;
public MIbcGroup(string name, TypeSystemMetadataEmitter emitter)
{
_buffer = new BlobBuilder();
_il = new InstructionEncoder(_buffer);
_name = name;
_emitter = emitter;
}
private BlobBuilder _buffer;
private InstructionEncoder _il;
private string _name;
private TypeSystemMetadataEmitter _emitter;
public void AddProcessedMethodData(MethodProfileData processedMethodData)
{
MethodDesc method = processedMethodData.Method;
// Format is
// ldtoken method
// variable amount of extra metadata about the method, Extension data is encoded via ldstr "id"
// pop
// Extensions generated by this emitter:
//
// ldstr "ExclusiveWeight"
// Any ldc.i4 or ldc.r4 or ldc.r8 instruction to indicate the exclusive weight
//
// ldstr "WeightedCallData"
// ldc.i4 <Count of methods called>
// Repeat <Count of methods called times>
// ldtoken <Method called from this method>
// ldc.i4 <Weight associated with calling the <Method called from this method>>
//
// ldstr "InstrumentationDataStart"
// Encoded ints and longs, using ldc.i4, and ldc.i8 instructions as well as ldtoken <type> instructions
// ldstr "InstrumentationDataEnd" as a terminator
try
{
EntityHandle methodHandle = _emitter.GetMethodRef(method);
_il.OpCode(ILOpCode.Ldtoken);
_il.Token(methodHandle);
if (processedMethodData.ExclusiveWeight != 0)
{
_il.LoadString(_emitter.GetUserStringHandle("ExclusiveWeight"));
if (((double)(int)processedMethodData.ExclusiveWeight) == processedMethodData.ExclusiveWeight)
_il.LoadConstantI4((int)processedMethodData.ExclusiveWeight);
else
_il.LoadConstantR8(processedMethodData.ExclusiveWeight);
}
if ((processedMethodData.CallWeights != null) && processedMethodData.CallWeights.Count > 0)
{
_il.LoadString(_emitter.GetUserStringHandle("WeightedCallData"));
_il.LoadConstantI4(processedMethodData.CallWeights.Count);
foreach (var entry in processedMethodData.CallWeights)
{
EntityHandle calledMethod = _emitter.GetMethodRef(entry.Key);
_il.OpCode(ILOpCode.Ldtoken);
_il.Token(calledMethod);
_il.LoadConstantI4(entry.Value);
}
}
if (processedMethodData.SchemaData != null)
{
_il.LoadString(_emitter.GetUserStringHandle("InstrumentationDataStart"));
PgoProcessor.EncodePgoData<TypeSystemEntityOrUnknown>(processedMethodData.SchemaData, this, true);
}
_il.OpCode(ILOpCode.Pop);
}
catch (Exception ex)
{
Program.PrintWarning($"Exception {ex} while attempting to generate method lists");
}
}
public MethodDefinitionHandle EmitMethod()
{
s_emitCount++;
string basicName = "Assemblies_" + _name;
if (_name.Length > 200)
basicName = basicName.Substring(0, 200); // Cap length of name at 200, which is reasonably small.
string methodName = basicName + "_" + s_emitCount.ToString(CultureInfo.InvariantCulture);
return _emitter.AddGlobalMethod(methodName, _il, 8);
}
bool IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>.EmitDone()
{
_il.LoadString(_emitter.GetUserStringHandle("InstrumentationDataEnd"));
return true;
}
void IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>.EmitLong(long value, long previousValue)
{
if ((value <= int.MaxValue) && (value >= int.MinValue))
{
_il.LoadConstantI4(checked((int)value));
}
else
{
_il.LoadConstantI8(value);
}
}
void IPgoEncodedValueEmitter<TypeSystemEntityOrUnknown>.EmitType(TypeSystemEntityOrUnknown type, TypeSystemEntityOrUnknown previousValue)
{
if (type.AsType != null)
{
_il.OpCode(ILOpCode.Ldtoken);
_il.Token(_emitter.GetTypeRef(type.AsType));
}
else
{
_il.LoadConstantI4(type.AsUnknown & 0x00FFFFFF);
}
}
}
private static string GetTypeDefiningAssembly(TypeDesc type)
{
return ((MetadataType)type).Module.Assembly.GetName().Name;
}
private static void AddAssembliesAssociatedWithType(TypeDesc type, HashSet<string> assemblies, out string definingAssembly)
{
definingAssembly = GetTypeDefiningAssembly(type);
assemblies.Add(definingAssembly);
AddAssembliesAssociatedWithType(type, assemblies);
}
private static void AddAssembliesAssociatedWithType(TypeDesc type, HashSet<string> assemblies)
{
if (type.IsPrimitive)
return;
if (type.Context.IsCanonicalDefinitionType(type, CanonicalFormKind.Any))
return;
if (type.IsParameterizedType)
{
AddAssembliesAssociatedWithType(type.GetParameterType(), assemblies);
}
else
{
assemblies.Add(GetTypeDefiningAssembly(type));
foreach (var instantiationType in type.Instantiation)
{
AddAssembliesAssociatedWithType(instantiationType, assemblies);
}
}
}
private static void AddAssembliesAssociatedWithMethod(MethodDesc method, HashSet<string> assemblies, out string definingAssembly)
{
AddAssembliesAssociatedWithType(method.OwningType, assemblies, out definingAssembly);
foreach (var instantiationType in method.Instantiation)
{
AddAssembliesAssociatedWithType(instantiationType, assemblies);
}
}
public static int GenerateMibcFile(TypeSystemContext tsc, FileInfo outputFileName, IEnumerable<MethodProfileData> methodsToAttemptToPlaceIntoProfileData, bool validate, bool uncompressed)
{
TypeSystemMetadataEmitter emitter = new TypeSystemMetadataEmitter(new AssemblyName(outputFileName.Name), tsc);
SortedDictionary<string, MIbcGroup> groups = new SortedDictionary<string, MIbcGroup>();
StringBuilder mibcGroupNameBuilder = new StringBuilder();
HashSet<string> assembliesAssociatedWithMethod = new HashSet<string>();
foreach (var entry in methodsToAttemptToPlaceIntoProfileData)
{
MethodDesc method = entry.Method;
assembliesAssociatedWithMethod.Clear();
AddAssembliesAssociatedWithMethod(method, assembliesAssociatedWithMethod, out string definingAssembly);
string[] assemblyNames = new string[assembliesAssociatedWithMethod.Count];
int i = 1;
assemblyNames[0] = definingAssembly;
foreach (string s in assembliesAssociatedWithMethod)
{
if (s.Equals(definingAssembly))
continue;
assemblyNames[i++] = s;
}
// Always keep the defining assembly as the first name
Array.Sort(assemblyNames, 1, assemblyNames.Length - 1);
mibcGroupNameBuilder.Clear();
foreach (string s in assemblyNames)
{
mibcGroupNameBuilder.Append(s);
mibcGroupNameBuilder.Append(';');
}
string mibcGroupName = mibcGroupNameBuilder.ToString();
if (!groups.TryGetValue(mibcGroupName, out MIbcGroup mibcGroup))
{
mibcGroup = new MIbcGroup(mibcGroupName, emitter);
groups.Add(mibcGroupName, mibcGroup);
}
mibcGroup.AddProcessedMethodData(entry);
}
var buffer = new BlobBuilder();
var il = new InstructionEncoder(buffer);
foreach (var entry in groups)
{
il.LoadString(emitter.GetUserStringHandle(entry.Key));
il.OpCode(ILOpCode.Ldtoken);
il.Token(entry.Value.EmitMethod());
il.OpCode(ILOpCode.Pop);
}
emitter.AddGlobalMethod("AssemblyDictionary", il, 8);
MemoryStream peFile = new MemoryStream();
emitter.SerializeToStream(peFile);
peFile.Position = 0;
if (outputFileName.Exists)
{
outputFileName.Delete();
}
if (uncompressed)
{
using (FileStream file = new FileStream(outputFileName.FullName, FileMode.Create))
{
peFile.CopyTo(file);
}
}
else
{
using (ZipArchive file = ZipFile.Open(outputFileName.FullName, ZipArchiveMode.Create))
{
var entry = file.CreateEntry(outputFileName.Name + ".dll", CompressionLevel.Optimal);
using (Stream archiveStream = entry.Open())
{
peFile.CopyTo(archiveStream);
}
}
}
Program.PrintMessage($"Generated {outputFileName.FullName}");
if (validate)
return ValidateMIbcData(tsc, outputFileName, peFile.ToArray(), methodsToAttemptToPlaceIntoProfileData);
else
return 0;
}
static int ValidateMIbcData(TypeSystemContext tsc, FileInfo outputFileName, byte[] moduleBytes, IEnumerable<MethodProfileData> methodsToAttemptToPrepare)
{
var peReader = new System.Reflection.PortableExecutable.PEReader(System.Collections.Immutable.ImmutableArray.Create<byte>(moduleBytes));
var profileData = MIbcProfileParser.ParseMIbcFile(tsc, peReader, null, null);
Dictionary<MethodDesc, MethodProfileData> mibcDict = new Dictionary<MethodDesc, MethodProfileData>();
foreach (var mibcData in profileData.GetAllMethodProfileData())
{
mibcDict.Add((MethodDesc)(object)mibcData.Method, mibcData);
}
bool failure = false;
if (methodsToAttemptToPrepare.Count() != mibcDict.Count)
{
Program.PrintError($"Not same count of methods {methodsToAttemptToPrepare.Count()} != {mibcDict.Count}");
failure = true;
}
foreach (var entry in methodsToAttemptToPrepare)
{
MethodDesc method = entry.Method;
if (!mibcDict.ContainsKey(method))
{
Program.PrintError($"{method} not found in mibcEntryData");
failure = true;
continue;
}
}
if (failure)
{
return -1;
}
else
{
Program.PrintMessage($"Validated {outputFileName.FullName}");
return 0;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Methodical/explicit/basic/refarg_i2_d.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="refarg_i2.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="refarg_i2.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/HardwareIntrinsics/General/Vector256/Xor.Int16.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void XorInt16()
{
var test = new VectorBinaryOpTest__XorInt16();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__XorInt16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int16[] inArray1, Int16[] inArray2, Int16[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<Int16> _fld1;
public Vector256<Int16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__XorInt16 testClass)
{
var result = Vector256.Xor(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16);
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Vector256<Int16> _clsVar1;
private static Vector256<Int16> _clsVar2;
private Vector256<Int16> _fld1;
private Vector256<Int16> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__XorInt16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
}
public VectorBinaryOpTest__XorInt16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector256.Xor(
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector256).GetMethod(nameof(Vector256.Xor), new Type[] {
typeof(Vector256<Int16>),
typeof(Vector256<Int16>)
});
if (method is null)
{
method = typeof(Vector256).GetMethod(nameof(Vector256.Xor), 1, new Type[] {
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int16));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector256.Xor(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr);
var result = Vector256.Xor(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__XorInt16();
var result = Vector256.Xor(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector256.Xor(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector256.Xor(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<Int16> op1, Vector256<Int16> op2, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int16[] left, Int16[] right, Int16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (short)(left[0] ^ right[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (short)(left[i] ^ right[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.Xor)}<Int16>(Vector256<Int16>, Vector256<Int16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void XorInt16()
{
var test = new VectorBinaryOpTest__XorInt16();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__XorInt16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int16[] inArray1, Int16[] inArray2, Int16[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<Int16> _fld1;
public Vector256<Int16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__XorInt16 testClass)
{
var result = Vector256.Xor(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int16>>() / sizeof(Int16);
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Vector256<Int16> _clsVar1;
private static Vector256<Int16> _clsVar2;
private Vector256<Int16> _fld1;
private Vector256<Int16> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__XorInt16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
}
public VectorBinaryOpTest__XorInt16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector256.Xor(
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector256).GetMethod(nameof(Vector256.Xor), new Type[] {
typeof(Vector256<Int16>),
typeof(Vector256<Int16>)
});
if (method is null)
{
method = typeof(Vector256).GetMethod(nameof(Vector256.Xor), 1, new Type[] {
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int16));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector256.Xor(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector256<Int16>>(_dataTable.inArray2Ptr);
var result = Vector256.Xor(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__XorInt16();
var result = Vector256.Xor(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector256.Xor(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector256.Xor(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<Int16> op1, Vector256<Int16> op2, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int16[] left, Int16[] right, Int16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (short)(left[0] ^ right[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (short)(left[i] ^ right[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.Xor)}<Int16>(Vector256<Int16>, Vector256<Int16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/GC/Features/Finalizer/finalizeother/finalizearraysleep.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// Tests Sleep in Finalizer for array of objects
using System;
using System.Threading;
using System.Runtime.CompilerServices;
public class Test_finalizearraysleep {
public class Dummy {
public static int count=0;
~Dummy() {
count++;
Thread.Sleep(1000);
}
}
public class CreateObj {
public Dummy[] obj;
public int ExitCode = 0;
// No inline to ensure no stray refs to the new array
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public CreateObj() {
obj = new Dummy[10];
for(int i=0;i<10;i++) {
obj[i] = new Dummy();
}
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public void RunTest() {
obj=null; // making sure collect is called even with /debug
GC.Collect();
GC.WaitForPendingFinalizers();
}
}
public static int Main() {
CreateObj temp = new CreateObj();
temp.RunTest();
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
if (Dummy.count == 10)
{
Console.WriteLine("Test for Finalize() for array of objects passed!");
return 100;
}
else
{
Console.WriteLine("Test for Finalize() for array of objects failed!");
return 0;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// Tests Sleep in Finalizer for array of objects
using System;
using System.Threading;
using System.Runtime.CompilerServices;
public class Test_finalizearraysleep {
public class Dummy {
public static int count=0;
~Dummy() {
count++;
Thread.Sleep(1000);
}
}
public class CreateObj {
public Dummy[] obj;
public int ExitCode = 0;
// No inline to ensure no stray refs to the new array
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public CreateObj() {
obj = new Dummy[10];
for(int i=0;i<10;i++) {
obj[i] = new Dummy();
}
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public void RunTest() {
obj=null; // making sure collect is called even with /debug
GC.Collect();
GC.WaitForPendingFinalizers();
}
}
public static int Main() {
CreateObj temp = new CreateObj();
temp.RunTest();
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
if (Dummy.count == 10)
{
Console.WriteLine("Test for Finalize() for array of objects passed!");
return 100;
}
else
{
Console.WriteLine("Test for Finalize() for array of objects failed!");
return 0;
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/nativeaot/Runtime/i386/ThunkPoolThunks.asm | ;; Licensed to the .NET Foundation under one or more agreements.
;; The .NET Foundation licenses this file to you under the MIT license.
.586
.model flat
option casemap:none
.code
include AsmMacros.inc
;; -----------------------------------------------------------------------------------------------------------
;; standard macros
;; -----------------------------------------------------------------------------------------------------------
LEAF_ENTRY macro Name, Section
Section segment para 'CODE'
public Name
Name proc
endm
NAMED_LEAF_ENTRY macro Name, Section, SectionAlias
Section segment para alias(SectionAlias) 'CODE'
public Name
Name proc
endm
LEAF_END macro Name, Section
Name endp
Section ends
endm
NAMED_READONLY_DATA_SECTION macro Section, SectionAlias
Section segment para alias(SectionAlias) read 'DATA'
DD 0
Section ends
endm
NAMED_READWRITE_DATA_SECTION macro Section, SectionAlias
Section segment para alias(SectionAlias) read write 'DATA'
DD 0
Section ends
endm
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; STUBS & DATA SECTIONS ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
THUNK_CODESIZE equ 20h ;; 5-byte call, 1 byte pop, 6-byte lea, 6-byte jmp, 14 bytes of padding
THUNK_DATASIZE equ 08h ;; 2 dwords
THUNK_POOL_NUM_THUNKS_PER_PAGE equ 078h ;; 120 thunks per page
PAGE_SIZE equ 01000h ;; 4K
POINTER_SIZE equ 04h
GET_CURRENT_IP macro
ALIGN 10h ;; make sure we align to 16-byte boundary for CFG table
call @F
@@: pop eax
endm
LOAD_DATA_ADDRESS macro groupIndex, index
;; start : eax points to current instruction of the current thunk
;; set eax to begining of data page : eax <- [eax - (size of the call instruction + (THUNK_CODESIZE * current thunk's index)) + PAGE_SIZE]
;; fix offset of the data : eax <- eax + (THUNK_DATASIZE * current thunk's index)
lea eax,[eax - (5 + groupIndex * THUNK_CODESIZE * 10 + THUNK_CODESIZE * index) + PAGE_SIZE + (groupIndex * THUNK_DATASIZE * 10 + THUNK_DATASIZE * index)]
endm
JUMP_TO_COMMON macro groupIndex, index
;; start : eax points to current thunk's data block
;; re-point eax to begining of data page : eax <- [eax - (THUNK_DATASIZE * current thunk's index)]
;; jump to the location pointed at by the last dword in the data page : jump [eax + PAGE_SIZE - POINTER_SIZE]
jmp dword ptr[eax - (groupIndex * THUNK_DATASIZE * 10 + THUNK_DATASIZE * index) + PAGE_SIZE - POINTER_SIZE]
endm
TenThunks macro groupIndex
;; Each thunk will load the address of its corresponding data (from the page that immediately follows)
;; and call a common stub. The address of the common stub is setup by the caller (last dword
;; in the thunks data section) depending on the 'kind' of thunks needed (interop, fat function pointers, etc...)
;; Each data block used by a thunk consists of two dword values:
;; - Context: some value given to the thunk as context (passed in eax). Example for fat-fptrs: context = generic dictionary
;; - Target : target code that the thunk eventually jumps to.
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,0
JUMP_TO_COMMON groupIndex,0
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,1
JUMP_TO_COMMON groupIndex,1
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,2
JUMP_TO_COMMON groupIndex,2
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,3
JUMP_TO_COMMON groupIndex,3
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,4
JUMP_TO_COMMON groupIndex,4
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,5
JUMP_TO_COMMON groupIndex,5
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,6
JUMP_TO_COMMON groupIndex,6
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,7
JUMP_TO_COMMON groupIndex,7
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,8
JUMP_TO_COMMON groupIndex,8
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,9
JUMP_TO_COMMON groupIndex,9
endm
THUNKS_PAGE_BLOCK macro
TenThunks 0
TenThunks 1
TenThunks 2
TenThunks 3
TenThunks 4
TenThunks 5
TenThunks 6
TenThunks 7
TenThunks 8
TenThunks 9
TenThunks 10
TenThunks 11
endm
;;
;; The first thunks section should be 64K aligned because it can get
;; mapped multiple times in memory, and mapping works on allocation
;; granularity boundaries (we don't want to map more than what we need)
;;
;; The easiest way to do so is by having the thunks section at the
;; first 64K aligned virtual address in the binary. We provide a section
;; layout file to the linker to tell it how to layout the thunks sections
;; that we care about. (ndp\rh\src\runtime\DLLs\app\mrt100_app_sectionlayout.txt)
;;
;; The PE spec says images cannot have gaps between sections (other
;; than what is required by the section alignment value in the header),
;; therefore we need a couple of padding data sections (otherwise the
;; OS will not load the image).
;;
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment0, ".pad0"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment1, ".pad1"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment2, ".pad2"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment3, ".pad3"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment4, ".pad4"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment5, ".pad5"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment6, ".pad6"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment7, ".pad7"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment8, ".pad8"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment9, ".pad9"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment10, ".pad10"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment11, ".pad11"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment12, ".pad12"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment13, ".pad13"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment14, ".pad14"
;;
;; Thunk Stubs
;; NOTE: Keep number of blocks in sync with macro/constant named 'NUM_THUNK_BLOCKS' in:
;; - ndp\FxCore\src\System.Private.CoreLib\System\Runtime\InteropServices\ThunkPool.cs
;; - ndp\rh\src\tools\rhbind\zapimage.h
;;
NAMED_LEAF_ENTRY ThunkPool, TKS0, ".tks0"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool, TKS0
NAMED_READWRITE_DATA_SECTION ThunkData0, ".tkd0"
NAMED_LEAF_ENTRY ThunkPool1, TKS1, ".tks1"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool1, TKS1
NAMED_READWRITE_DATA_SECTION ThunkData1, ".tkd1"
NAMED_LEAF_ENTRY ThunkPool2, TKS2, ".tks2"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool2, TKS2
NAMED_READWRITE_DATA_SECTION ThunkData2, ".tkd2"
NAMED_LEAF_ENTRY ThunkPool3, TKS3, ".tks3"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool3, TKS3
NAMED_READWRITE_DATA_SECTION ThunkData3, ".tkd3"
NAMED_LEAF_ENTRY ThunkPool4, TKS4, ".tks4"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool4, TKS4
NAMED_READWRITE_DATA_SECTION ThunkData4, ".tkd4"
NAMED_LEAF_ENTRY ThunkPool5, TKS5, ".tks5"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool5, TKS5
NAMED_READWRITE_DATA_SECTION ThunkData5, ".tkd5"
NAMED_LEAF_ENTRY ThunkPool6, TKS6, ".tks6"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool6, TKS6
NAMED_READWRITE_DATA_SECTION ThunkData6, ".tkd6"
NAMED_LEAF_ENTRY ThunkPool7, TKS7, ".tks7"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool7, TKS7
NAMED_READWRITE_DATA_SECTION ThunkData7, ".tkd7"
;;
;; IntPtr RhpGetThunksBase()
;;
FASTCALL_FUNC RhpGetThunksBase, 0
;; Return the address of the first thunk pool to the caller (this is really the base address)
lea eax, [ThunkPool]
ret
FASTCALL_ENDFUNC
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; General Helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; int RhpGetNumThunksPerBlock()
;;
FASTCALL_FUNC RhpGetNumThunksPerBlock, 0
mov eax, THUNK_POOL_NUM_THUNKS_PER_PAGE
ret
FASTCALL_ENDFUNC
;;
;; int RhpGetThunkSize()
;;
FASTCALL_FUNC RhpGetThunkSize, 0
mov eax, THUNK_CODESIZE
ret
FASTCALL_ENDFUNC
;;
;; int RhpGetNumThunkBlocksPerMapping()
;;
FASTCALL_FUNC RhpGetNumThunkBlocksPerMapping, 0
mov eax, 8
ret
FASTCALL_ENDFUNC
;;
;; int RhpGetThunkBlockSize
;;
FASTCALL_FUNC RhpGetThunkBlockSize, 0
mov eax, PAGE_SIZE * 2
ret
FASTCALL_ENDFUNC
;;
;; IntPtr RhpGetThunkDataBlockAddress(IntPtr thunkStubAddress)
;;
FASTCALL_FUNC RhpGetThunkDataBlockAddress, 4
mov eax, ecx
mov ecx, PAGE_SIZE - 1
not ecx
and eax, ecx
add eax, PAGE_SIZE
ret
FASTCALL_ENDFUNC
;;
;; IntPtr RhpGetThunkStubsBlockAddress(IntPtr thunkDataAddress)
;;
FASTCALL_FUNC RhpGetThunkStubsBlockAddress, 4
mov eax, ecx
mov ecx, PAGE_SIZE - 1
not ecx
and eax, ecx
sub eax, PAGE_SIZE
ret
FASTCALL_ENDFUNC
end
| ;; Licensed to the .NET Foundation under one or more agreements.
;; The .NET Foundation licenses this file to you under the MIT license.
.586
.model flat
option casemap:none
.code
include AsmMacros.inc
;; -----------------------------------------------------------------------------------------------------------
;; standard macros
;; -----------------------------------------------------------------------------------------------------------
LEAF_ENTRY macro Name, Section
Section segment para 'CODE'
public Name
Name proc
endm
NAMED_LEAF_ENTRY macro Name, Section, SectionAlias
Section segment para alias(SectionAlias) 'CODE'
public Name
Name proc
endm
LEAF_END macro Name, Section
Name endp
Section ends
endm
NAMED_READONLY_DATA_SECTION macro Section, SectionAlias
Section segment para alias(SectionAlias) read 'DATA'
DD 0
Section ends
endm
NAMED_READWRITE_DATA_SECTION macro Section, SectionAlias
Section segment para alias(SectionAlias) read write 'DATA'
DD 0
Section ends
endm
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; STUBS & DATA SECTIONS ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
THUNK_CODESIZE equ 20h ;; 5-byte call, 1 byte pop, 6-byte lea, 6-byte jmp, 14 bytes of padding
THUNK_DATASIZE equ 08h ;; 2 dwords
THUNK_POOL_NUM_THUNKS_PER_PAGE equ 078h ;; 120 thunks per page
PAGE_SIZE equ 01000h ;; 4K
POINTER_SIZE equ 04h
GET_CURRENT_IP macro
ALIGN 10h ;; make sure we align to 16-byte boundary for CFG table
call @F
@@: pop eax
endm
LOAD_DATA_ADDRESS macro groupIndex, index
;; start : eax points to current instruction of the current thunk
;; set eax to begining of data page : eax <- [eax - (size of the call instruction + (THUNK_CODESIZE * current thunk's index)) + PAGE_SIZE]
;; fix offset of the data : eax <- eax + (THUNK_DATASIZE * current thunk's index)
lea eax,[eax - (5 + groupIndex * THUNK_CODESIZE * 10 + THUNK_CODESIZE * index) + PAGE_SIZE + (groupIndex * THUNK_DATASIZE * 10 + THUNK_DATASIZE * index)]
endm
JUMP_TO_COMMON macro groupIndex, index
;; start : eax points to current thunk's data block
;; re-point eax to begining of data page : eax <- [eax - (THUNK_DATASIZE * current thunk's index)]
;; jump to the location pointed at by the last dword in the data page : jump [eax + PAGE_SIZE - POINTER_SIZE]
jmp dword ptr[eax - (groupIndex * THUNK_DATASIZE * 10 + THUNK_DATASIZE * index) + PAGE_SIZE - POINTER_SIZE]
endm
TenThunks macro groupIndex
;; Each thunk will load the address of its corresponding data (from the page that immediately follows)
;; and call a common stub. The address of the common stub is setup by the caller (last dword
;; in the thunks data section) depending on the 'kind' of thunks needed (interop, fat function pointers, etc...)
;; Each data block used by a thunk consists of two dword values:
;; - Context: some value given to the thunk as context (passed in eax). Example for fat-fptrs: context = generic dictionary
;; - Target : target code that the thunk eventually jumps to.
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,0
JUMP_TO_COMMON groupIndex,0
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,1
JUMP_TO_COMMON groupIndex,1
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,2
JUMP_TO_COMMON groupIndex,2
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,3
JUMP_TO_COMMON groupIndex,3
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,4
JUMP_TO_COMMON groupIndex,4
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,5
JUMP_TO_COMMON groupIndex,5
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,6
JUMP_TO_COMMON groupIndex,6
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,7
JUMP_TO_COMMON groupIndex,7
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,8
JUMP_TO_COMMON groupIndex,8
GET_CURRENT_IP
LOAD_DATA_ADDRESS groupIndex,9
JUMP_TO_COMMON groupIndex,9
endm
THUNKS_PAGE_BLOCK macro
TenThunks 0
TenThunks 1
TenThunks 2
TenThunks 3
TenThunks 4
TenThunks 5
TenThunks 6
TenThunks 7
TenThunks 8
TenThunks 9
TenThunks 10
TenThunks 11
endm
;;
;; The first thunks section should be 64K aligned because it can get
;; mapped multiple times in memory, and mapping works on allocation
;; granularity boundaries (we don't want to map more than what we need)
;;
;; The easiest way to do so is by having the thunks section at the
;; first 64K aligned virtual address in the binary. We provide a section
;; layout file to the linker to tell it how to layout the thunks sections
;; that we care about. (ndp\rh\src\runtime\DLLs\app\mrt100_app_sectionlayout.txt)
;;
;; The PE spec says images cannot have gaps between sections (other
;; than what is required by the section alignment value in the header),
;; therefore we need a couple of padding data sections (otherwise the
;; OS will not load the image).
;;
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment0, ".pad0"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment1, ".pad1"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment2, ".pad2"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment3, ".pad3"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment4, ".pad4"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment5, ".pad5"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment6, ".pad6"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment7, ".pad7"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment8, ".pad8"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment9, ".pad9"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment10, ".pad10"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment11, ".pad11"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment12, ".pad12"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment13, ".pad13"
NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment14, ".pad14"
;;
;; Thunk Stubs
;; NOTE: Keep number of blocks in sync with macro/constant named 'NUM_THUNK_BLOCKS' in:
;; - ndp\FxCore\src\System.Private.CoreLib\System\Runtime\InteropServices\ThunkPool.cs
;; - ndp\rh\src\tools\rhbind\zapimage.h
;;
NAMED_LEAF_ENTRY ThunkPool, TKS0, ".tks0"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool, TKS0
NAMED_READWRITE_DATA_SECTION ThunkData0, ".tkd0"
NAMED_LEAF_ENTRY ThunkPool1, TKS1, ".tks1"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool1, TKS1
NAMED_READWRITE_DATA_SECTION ThunkData1, ".tkd1"
NAMED_LEAF_ENTRY ThunkPool2, TKS2, ".tks2"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool2, TKS2
NAMED_READWRITE_DATA_SECTION ThunkData2, ".tkd2"
NAMED_LEAF_ENTRY ThunkPool3, TKS3, ".tks3"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool3, TKS3
NAMED_READWRITE_DATA_SECTION ThunkData3, ".tkd3"
NAMED_LEAF_ENTRY ThunkPool4, TKS4, ".tks4"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool4, TKS4
NAMED_READWRITE_DATA_SECTION ThunkData4, ".tkd4"
NAMED_LEAF_ENTRY ThunkPool5, TKS5, ".tks5"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool5, TKS5
NAMED_READWRITE_DATA_SECTION ThunkData5, ".tkd5"
NAMED_LEAF_ENTRY ThunkPool6, TKS6, ".tks6"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool6, TKS6
NAMED_READWRITE_DATA_SECTION ThunkData6, ".tkd6"
NAMED_LEAF_ENTRY ThunkPool7, TKS7, ".tks7"
THUNKS_PAGE_BLOCK
LEAF_END ThunkPool7, TKS7
NAMED_READWRITE_DATA_SECTION ThunkData7, ".tkd7"
;;
;; IntPtr RhpGetThunksBase()
;;
FASTCALL_FUNC RhpGetThunksBase, 0
;; Return the address of the first thunk pool to the caller (this is really the base address)
lea eax, [ThunkPool]
ret
FASTCALL_ENDFUNC
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; General Helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; int RhpGetNumThunksPerBlock()
;;
FASTCALL_FUNC RhpGetNumThunksPerBlock, 0
mov eax, THUNK_POOL_NUM_THUNKS_PER_PAGE
ret
FASTCALL_ENDFUNC
;;
;; int RhpGetThunkSize()
;;
FASTCALL_FUNC RhpGetThunkSize, 0
mov eax, THUNK_CODESIZE
ret
FASTCALL_ENDFUNC
;;
;; int RhpGetNumThunkBlocksPerMapping()
;;
FASTCALL_FUNC RhpGetNumThunkBlocksPerMapping, 0
mov eax, 8
ret
FASTCALL_ENDFUNC
;;
;; int RhpGetThunkBlockSize
;;
FASTCALL_FUNC RhpGetThunkBlockSize, 0
mov eax, PAGE_SIZE * 2
ret
FASTCALL_ENDFUNC
;;
;; IntPtr RhpGetThunkDataBlockAddress(IntPtr thunkStubAddress)
;;
FASTCALL_FUNC RhpGetThunkDataBlockAddress, 4
mov eax, ecx
mov ecx, PAGE_SIZE - 1
not ecx
and eax, ecx
add eax, PAGE_SIZE
ret
FASTCALL_ENDFUNC
;;
;; IntPtr RhpGetThunkStubsBlockAddress(IntPtr thunkDataAddress)
;;
FASTCALL_FUNC RhpGetThunkStubsBlockAddress, 4
mov eax, ecx
mov ecx, PAGE_SIZE - 1
not ecx
and eax, ecx
sub eax, PAGE_SIZE
ret
FASTCALL_ENDFUNC
end
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Runtime/tests/System/StringComparerTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Globalization;
using System.Reflection;
using Xunit;
namespace System.Tests
{
public class StringComparerTests
{
[Fact]
public void Create_InvalidArguments_Throws()
{
AssertExtensions.Throws<ArgumentNullException>("culture", () => StringComparer.Create(null, ignoreCase: true));
}
[Fact]
public void Create_CreatesValidComparer()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: true);
Assert.NotNull(c);
Assert.True(c.Equals((object)"hello", (object)"HEllO"));
Assert.True(c.Equals("hello", "HEllO"));
Assert.False(c.Equals((object)"bello", (object)"HEllO"));
Assert.False(c.Equals("bello", "HEllO"));
c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: false);
Assert.NotNull(c);
#pragma warning disable 0618 // suppress obsolete warning for String.Copy
Assert.True(c.Equals((object)"hello", (object)string.Copy("hello")));
#pragma warning restore 0618 // restore warning when accessing obsolete members
Assert.False(c.Equals((object)"hello", (object)"HEllO"));
Assert.False(c.Equals("hello", "HEllO"));
Assert.False(c.Equals((object)"bello", (object)"HEllO"));
Assert.False(c.Equals("bello", "HEllO"));
object obj = new object();
Assert.Equal(c.GetHashCode((object)"hello"), c.GetHashCode((object)"hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode("hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode((object)"hello"));
Assert.Equal(obj.GetHashCode(), c.GetHashCode(obj));
Assert.Equal(42.CompareTo(84), c.Compare(42, 84));
Assert.Throws<ArgumentException>(() => c.Compare("42", 84));
Assert.Equal(1, c.Compare("42", null));
Assert.Throws<ArgumentException>(() => c.Compare(42, "84"));
}
[Fact]
public void Compare_InvalidArguments_Throws()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: true);
Assert.Throws<ArgumentException>(() => c.Compare(new object(), 42));
}
[Fact]
public void GetHashCode_InvalidArguments_Throws()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: true);
AssertExtensions.Throws<ArgumentNullException>("obj", () => c.GetHashCode(null));
AssertExtensions.Throws<ArgumentNullException>("obj", () => c.GetHashCode((object)null));
}
[Fact]
public void Compare_ViaSort_SortsAsExpected()
{
string[] strings = new[] { "a", "b", "AB", "A", "cde", "abc", "f", "123", "ab" };
Array.Sort(strings, StringComparer.OrdinalIgnoreCase);
Assert.Equal<string>(strings, new[] { "123", "a", "A", "AB", "ab", "abc", "b", "cde", "f" });
Array.Sort(strings, StringComparer.Ordinal);
Assert.Equal<string>(strings, new[] { "123", "A", "AB", "a", "ab", "abc", "b", "cde", "f" });
}
[Fact]
public void Compare_ExpectedResults()
{
StringComparer c = StringComparer.Ordinal;
Assert.Equal(0, c.Compare((object)"hello", (object)"hello"));
#pragma warning disable 0618 // suppress obsolete warning for String.Copy
Assert.Equal(0, c.Compare((object)"hello", (object)string.Copy("hello")));
#pragma warning restore 0618 // restore warning when accessing obsolete members
Assert.Equal(-1, c.Compare(null, (object)"hello"));
Assert.Equal(1, c.Compare((object)"hello", null));
Assert.InRange(c.Compare((object)"hello", (object)"world"), int.MinValue, -1);
Assert.InRange(c.Compare((object)"world", (object)"hello"), 1, int.MaxValue);
}
[Fact]
public void Equals_ExpectedResults()
{
StringComparer c = StringComparer.Ordinal;
Assert.True(c.Equals((object)null, (object)null));
Assert.True(c.Equals(null, null));
Assert.True(c.Equals((object)"hello", (object)"hello"));
Assert.True(c.Equals("hello", "hello"));
Assert.False(c.Equals((object)null, "hello"));
Assert.False(c.Equals(null, "hello"));
Assert.False(c.Equals("hello", (object)null));
Assert.False(c.Equals("hello", null));
Assert.True(c.Equals(42, 42));
Assert.False(c.Equals(42, 84));
Assert.False(c.Equals("42", 84));
Assert.False(c.Equals(42, "84"));
}
[Fact]
public void CreateCultureOptions_InvalidArguments_Throws()
{
Assert.Throws<ArgumentNullException>(() => StringComparer.Create(null, CompareOptions.None));
}
[Fact]
public void CreateCultureOptions_CreatesValidComparer()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, CompareOptions.IgnoreCase);
Assert.NotNull(c);
Assert.True(c.Equals((object)"hello", (object)"HEllO"));
Assert.True(c.Equals("hello", "HEllO"));
Assert.False(c.Equals((object)"bello", (object)"HEllO"));
Assert.False(c.Equals("bello", "HEllO"));
object obj = new object();
Assert.Equal(c.GetHashCode((object)"hello"), c.GetHashCode((object)"hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode("hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode((object)"hello"));
Assert.Equal(obj.GetHashCode(), c.GetHashCode(obj));
Assert.Equal(42.CompareTo(84), c.Compare(42, 84));
Assert.Throws<ArgumentException>(() => c.Compare("42", 84));
Assert.Equal(1, c.Compare("42", null));
Assert.Throws<ArgumentException>(() => c.Compare(42, "84"));
}
[Fact]
public void IsWellKnownOrdinalComparer_TestCases()
{
CompareInfo ci_enUS = CompareInfo.GetCompareInfo("en-US");
// First, instantiate and test the comparers directly
RunTest(null, false, false);
RunTest(EqualityComparer<string>.Default, true, false); // EC<string>.Default is Ordinal-equivalent
RunTest(EqualityComparer<object>.Default, false, false); // EC<object>.Default isn't a string comparer
RunTest(StringComparer.Ordinal, true, false);
RunTest(StringComparer.OrdinalIgnoreCase, true, true);
RunTest(StringComparer.InvariantCulture, false, false); // not ordinal
RunTest(StringComparer.InvariantCultureIgnoreCase, false, false); // not ordinal
RunTest(GetNonRandomizedComparer("WrappedAroundDefaultComparer"), true, false); // EC<string>.Default is Ordinal-equivalent
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinal"), true, false);
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinalIgnoreCase"), true, true);
RunTest(new CustomStringComparer(), false, false); // not an inbox comparer
RunTest(ci_enUS.GetStringComparer(CompareOptions.None), false, false); // linguistic
RunTest(ci_enUS.GetStringComparer(CompareOptions.Ordinal), true, false);
RunTest(ci_enUS.GetStringComparer(CompareOptions.OrdinalIgnoreCase), true, true);
// Then, make sure that this API works with common collection types
RunTest(new Dictionary<string, object>().Comparer, true, false);
RunTest(new Dictionary<string, object>(StringComparer.Ordinal).Comparer, true, false);
RunTest(new Dictionary<string, object>(StringComparer.OrdinalIgnoreCase).Comparer, true, true);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCulture).Comparer, false, false);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCultureIgnoreCase).Comparer, false, false);
RunTest(new HashSet<string>().Comparer, true, false);
RunTest(new HashSet<string>(StringComparer.Ordinal).Comparer, true, false);
RunTest(new HashSet<string>(StringComparer.OrdinalIgnoreCase).Comparer, true, true);
RunTest(new HashSet<string>(StringComparer.InvariantCulture).Comparer, false, false);
RunTest(new HashSet<string>(StringComparer.InvariantCultureIgnoreCase).Comparer, false, false);
static void RunTest(IEqualityComparer<string> comparer, bool expectedIsOrdinal, bool expectedIgnoreCase)
{
Assert.Equal(expectedIsOrdinal, StringComparer.IsWellKnownOrdinalComparer(comparer, out bool actualIgnoreCase));
Assert.Equal(expectedIgnoreCase, actualIgnoreCase);
}
}
[Fact]
public void IsWellKnownCultureAwareComparer_TestCases()
{
CompareInfo ci_enUS = CompareInfo.GetCompareInfo("en-US");
CompareInfo ci_inv = CultureInfo.InvariantCulture.CompareInfo;
// First, instantiate and test the comparers directly
RunTest(null, null, default);
RunTest(EqualityComparer<string>.Default, null, default); // EC<string>.Default is not culture-aware
RunTest(EqualityComparer<object>.Default, null, default); // EC<object>.Default isn't a string comparer
RunTest(StringComparer.Ordinal, null, default);
RunTest(StringComparer.OrdinalIgnoreCase, null, default);
RunTest(StringComparer.InvariantCulture, ci_inv, CompareOptions.None);
RunTest(StringComparer.InvariantCultureIgnoreCase, ci_inv, CompareOptions.IgnoreCase);
RunTest(GetNonRandomizedComparer("WrappedAroundDefaultComparer"), null, default); // EC<string>.Default is Ordinal-equivalent
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinal"), null, default);
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinalIgnoreCase"), null, default);
RunTest(new CustomStringComparer(), null, default); // not an inbox comparer
RunTest(ci_enUS.GetStringComparer(CompareOptions.None), ci_enUS, CompareOptions.None);
RunTest(ci_enUS.GetStringComparer(CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType), ci_enUS, CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType);
RunTest(ci_enUS.GetStringComparer(CompareOptions.Ordinal), null, default); // not linguistic
RunTest(ci_enUS.GetStringComparer(CompareOptions.OrdinalIgnoreCase), null, default); // not linguistic
RunTest(StringComparer.Create(CultureInfo.InvariantCulture, false), ci_inv, CompareOptions.None);
RunTest(StringComparer.Create(CultureInfo.InvariantCulture, true), ci_inv, CompareOptions.IgnoreCase);
RunTest(StringComparer.Create(CultureInfo.InvariantCulture, CompareOptions.IgnoreSymbols), ci_inv, CompareOptions.IgnoreSymbols);
// Then, make sure that this API works with common collection types
RunTest(new Dictionary<string, object>().Comparer, null, default);
RunTest(new Dictionary<string, object>(StringComparer.Ordinal).Comparer, null, default);
RunTest(new Dictionary<string, object>(StringComparer.OrdinalIgnoreCase).Comparer, null, default);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCulture).Comparer, ci_inv, CompareOptions.None);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCultureIgnoreCase).Comparer, ci_inv, CompareOptions.IgnoreCase);
RunTest(new HashSet<string>().Comparer, null, default);
RunTest(new HashSet<string>(StringComparer.Ordinal).Comparer, null, default);
RunTest(new HashSet<string>(StringComparer.OrdinalIgnoreCase).Comparer, null, default);
RunTest(new HashSet<string>(StringComparer.InvariantCulture).Comparer, ci_inv, CompareOptions.None);
RunTest(new HashSet<string>(StringComparer.InvariantCultureIgnoreCase).Comparer, ci_inv, CompareOptions.IgnoreCase);
static void RunTest(IEqualityComparer<string> comparer, CompareInfo expectedCompareInfo, CompareOptions expectedCompareOptions)
{
bool actualReturnValue = StringComparer.IsWellKnownCultureAwareComparer(comparer, out CompareInfo actualCompareInfo, out CompareOptions actualCompareOptions);
Assert.Equal(expectedCompareInfo != null, actualReturnValue);
Assert.Equal(expectedCompareInfo, actualCompareInfo);
Assert.Equal(expectedCompareOptions, actualCompareOptions);
}
}
private static IEqualityComparer<string> GetNonRandomizedComparer(string name)
{
Type nonRandomizedComparerType = typeof(StringComparer).Assembly.GetType("System.Collections.Generic.NonRandomizedStringEqualityComparer");
Assert.NotNull(nonRandomizedComparerType);
FieldInfo fi = nonRandomizedComparerType.GetField(name, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Static);
Assert.NotNull(fi);
return (IEqualityComparer<string>)fi.GetValue(null);
}
private class CustomStringComparer : StringComparer
{
public override int Compare(string x, string y) => throw new NotImplementedException();
public override bool Equals(string x, string y) => throw new NotImplementedException();
public override int GetHashCode(string obj) => throw new NotImplementedException();
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Globalization;
using System.Reflection;
using Xunit;
namespace System.Tests
{
public class StringComparerTests
{
[Fact]
public void Create_InvalidArguments_Throws()
{
AssertExtensions.Throws<ArgumentNullException>("culture", () => StringComparer.Create(null, ignoreCase: true));
}
[Fact]
public void Create_CreatesValidComparer()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: true);
Assert.NotNull(c);
Assert.True(c.Equals((object)"hello", (object)"HEllO"));
Assert.True(c.Equals("hello", "HEllO"));
Assert.False(c.Equals((object)"bello", (object)"HEllO"));
Assert.False(c.Equals("bello", "HEllO"));
c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: false);
Assert.NotNull(c);
#pragma warning disable 0618 // suppress obsolete warning for String.Copy
Assert.True(c.Equals((object)"hello", (object)string.Copy("hello")));
#pragma warning restore 0618 // restore warning when accessing obsolete members
Assert.False(c.Equals((object)"hello", (object)"HEllO"));
Assert.False(c.Equals("hello", "HEllO"));
Assert.False(c.Equals((object)"bello", (object)"HEllO"));
Assert.False(c.Equals("bello", "HEllO"));
object obj = new object();
Assert.Equal(c.GetHashCode((object)"hello"), c.GetHashCode((object)"hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode("hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode((object)"hello"));
Assert.Equal(obj.GetHashCode(), c.GetHashCode(obj));
Assert.Equal(42.CompareTo(84), c.Compare(42, 84));
Assert.Throws<ArgumentException>(() => c.Compare("42", 84));
Assert.Equal(1, c.Compare("42", null));
Assert.Throws<ArgumentException>(() => c.Compare(42, "84"));
}
[Fact]
public void Compare_InvalidArguments_Throws()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: true);
Assert.Throws<ArgumentException>(() => c.Compare(new object(), 42));
}
[Fact]
public void GetHashCode_InvalidArguments_Throws()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, ignoreCase: true);
AssertExtensions.Throws<ArgumentNullException>("obj", () => c.GetHashCode(null));
AssertExtensions.Throws<ArgumentNullException>("obj", () => c.GetHashCode((object)null));
}
[Fact]
public void Compare_ViaSort_SortsAsExpected()
{
string[] strings = new[] { "a", "b", "AB", "A", "cde", "abc", "f", "123", "ab" };
Array.Sort(strings, StringComparer.OrdinalIgnoreCase);
Assert.Equal<string>(strings, new[] { "123", "a", "A", "AB", "ab", "abc", "b", "cde", "f" });
Array.Sort(strings, StringComparer.Ordinal);
Assert.Equal<string>(strings, new[] { "123", "A", "AB", "a", "ab", "abc", "b", "cde", "f" });
}
[Fact]
public void Compare_ExpectedResults()
{
StringComparer c = StringComparer.Ordinal;
Assert.Equal(0, c.Compare((object)"hello", (object)"hello"));
#pragma warning disable 0618 // suppress obsolete warning for String.Copy
Assert.Equal(0, c.Compare((object)"hello", (object)string.Copy("hello")));
#pragma warning restore 0618 // restore warning when accessing obsolete members
Assert.Equal(-1, c.Compare(null, (object)"hello"));
Assert.Equal(1, c.Compare((object)"hello", null));
Assert.InRange(c.Compare((object)"hello", (object)"world"), int.MinValue, -1);
Assert.InRange(c.Compare((object)"world", (object)"hello"), 1, int.MaxValue);
}
[Fact]
public void Equals_ExpectedResults()
{
StringComparer c = StringComparer.Ordinal;
Assert.True(c.Equals((object)null, (object)null));
Assert.True(c.Equals(null, null));
Assert.True(c.Equals((object)"hello", (object)"hello"));
Assert.True(c.Equals("hello", "hello"));
Assert.False(c.Equals((object)null, "hello"));
Assert.False(c.Equals(null, "hello"));
Assert.False(c.Equals("hello", (object)null));
Assert.False(c.Equals("hello", null));
Assert.True(c.Equals(42, 42));
Assert.False(c.Equals(42, 84));
Assert.False(c.Equals("42", 84));
Assert.False(c.Equals(42, "84"));
}
[Fact]
public void CreateCultureOptions_InvalidArguments_Throws()
{
Assert.Throws<ArgumentNullException>(() => StringComparer.Create(null, CompareOptions.None));
}
[Fact]
public void CreateCultureOptions_CreatesValidComparer()
{
StringComparer c = StringComparer.Create(CultureInfo.InvariantCulture, CompareOptions.IgnoreCase);
Assert.NotNull(c);
Assert.True(c.Equals((object)"hello", (object)"HEllO"));
Assert.True(c.Equals("hello", "HEllO"));
Assert.False(c.Equals((object)"bello", (object)"HEllO"));
Assert.False(c.Equals("bello", "HEllO"));
object obj = new object();
Assert.Equal(c.GetHashCode((object)"hello"), c.GetHashCode((object)"hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode("hello"));
Assert.Equal(c.GetHashCode("hello"), c.GetHashCode((object)"hello"));
Assert.Equal(obj.GetHashCode(), c.GetHashCode(obj));
Assert.Equal(42.CompareTo(84), c.Compare(42, 84));
Assert.Throws<ArgumentException>(() => c.Compare("42", 84));
Assert.Equal(1, c.Compare("42", null));
Assert.Throws<ArgumentException>(() => c.Compare(42, "84"));
}
[Fact]
public void IsWellKnownOrdinalComparer_TestCases()
{
CompareInfo ci_enUS = CompareInfo.GetCompareInfo("en-US");
// First, instantiate and test the comparers directly
RunTest(null, false, false);
RunTest(EqualityComparer<string>.Default, true, false); // EC<string>.Default is Ordinal-equivalent
RunTest(EqualityComparer<object>.Default, false, false); // EC<object>.Default isn't a string comparer
RunTest(StringComparer.Ordinal, true, false);
RunTest(StringComparer.OrdinalIgnoreCase, true, true);
RunTest(StringComparer.InvariantCulture, false, false); // not ordinal
RunTest(StringComparer.InvariantCultureIgnoreCase, false, false); // not ordinal
RunTest(GetNonRandomizedComparer("WrappedAroundDefaultComparer"), true, false); // EC<string>.Default is Ordinal-equivalent
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinal"), true, false);
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinalIgnoreCase"), true, true);
RunTest(new CustomStringComparer(), false, false); // not an inbox comparer
RunTest(ci_enUS.GetStringComparer(CompareOptions.None), false, false); // linguistic
RunTest(ci_enUS.GetStringComparer(CompareOptions.Ordinal), true, false);
RunTest(ci_enUS.GetStringComparer(CompareOptions.OrdinalIgnoreCase), true, true);
// Then, make sure that this API works with common collection types
RunTest(new Dictionary<string, object>().Comparer, true, false);
RunTest(new Dictionary<string, object>(StringComparer.Ordinal).Comparer, true, false);
RunTest(new Dictionary<string, object>(StringComparer.OrdinalIgnoreCase).Comparer, true, true);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCulture).Comparer, false, false);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCultureIgnoreCase).Comparer, false, false);
RunTest(new HashSet<string>().Comparer, true, false);
RunTest(new HashSet<string>(StringComparer.Ordinal).Comparer, true, false);
RunTest(new HashSet<string>(StringComparer.OrdinalIgnoreCase).Comparer, true, true);
RunTest(new HashSet<string>(StringComparer.InvariantCulture).Comparer, false, false);
RunTest(new HashSet<string>(StringComparer.InvariantCultureIgnoreCase).Comparer, false, false);
static void RunTest(IEqualityComparer<string> comparer, bool expectedIsOrdinal, bool expectedIgnoreCase)
{
Assert.Equal(expectedIsOrdinal, StringComparer.IsWellKnownOrdinalComparer(comparer, out bool actualIgnoreCase));
Assert.Equal(expectedIgnoreCase, actualIgnoreCase);
}
}
[Fact]
public void IsWellKnownCultureAwareComparer_TestCases()
{
CompareInfo ci_enUS = CompareInfo.GetCompareInfo("en-US");
CompareInfo ci_inv = CultureInfo.InvariantCulture.CompareInfo;
// First, instantiate and test the comparers directly
RunTest(null, null, default);
RunTest(EqualityComparer<string>.Default, null, default); // EC<string>.Default is not culture-aware
RunTest(EqualityComparer<object>.Default, null, default); // EC<object>.Default isn't a string comparer
RunTest(StringComparer.Ordinal, null, default);
RunTest(StringComparer.OrdinalIgnoreCase, null, default);
RunTest(StringComparer.InvariantCulture, ci_inv, CompareOptions.None);
RunTest(StringComparer.InvariantCultureIgnoreCase, ci_inv, CompareOptions.IgnoreCase);
RunTest(GetNonRandomizedComparer("WrappedAroundDefaultComparer"), null, default); // EC<string>.Default is Ordinal-equivalent
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinal"), null, default);
RunTest(GetNonRandomizedComparer("WrappedAroundStringComparerOrdinalIgnoreCase"), null, default);
RunTest(new CustomStringComparer(), null, default); // not an inbox comparer
RunTest(ci_enUS.GetStringComparer(CompareOptions.None), ci_enUS, CompareOptions.None);
RunTest(ci_enUS.GetStringComparer(CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType), ci_enUS, CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType);
RunTest(ci_enUS.GetStringComparer(CompareOptions.Ordinal), null, default); // not linguistic
RunTest(ci_enUS.GetStringComparer(CompareOptions.OrdinalIgnoreCase), null, default); // not linguistic
RunTest(StringComparer.Create(CultureInfo.InvariantCulture, false), ci_inv, CompareOptions.None);
RunTest(StringComparer.Create(CultureInfo.InvariantCulture, true), ci_inv, CompareOptions.IgnoreCase);
RunTest(StringComparer.Create(CultureInfo.InvariantCulture, CompareOptions.IgnoreSymbols), ci_inv, CompareOptions.IgnoreSymbols);
// Then, make sure that this API works with common collection types
RunTest(new Dictionary<string, object>().Comparer, null, default);
RunTest(new Dictionary<string, object>(StringComparer.Ordinal).Comparer, null, default);
RunTest(new Dictionary<string, object>(StringComparer.OrdinalIgnoreCase).Comparer, null, default);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCulture).Comparer, ci_inv, CompareOptions.None);
RunTest(new Dictionary<string, object>(StringComparer.InvariantCultureIgnoreCase).Comparer, ci_inv, CompareOptions.IgnoreCase);
RunTest(new HashSet<string>().Comparer, null, default);
RunTest(new HashSet<string>(StringComparer.Ordinal).Comparer, null, default);
RunTest(new HashSet<string>(StringComparer.OrdinalIgnoreCase).Comparer, null, default);
RunTest(new HashSet<string>(StringComparer.InvariantCulture).Comparer, ci_inv, CompareOptions.None);
RunTest(new HashSet<string>(StringComparer.InvariantCultureIgnoreCase).Comparer, ci_inv, CompareOptions.IgnoreCase);
static void RunTest(IEqualityComparer<string> comparer, CompareInfo expectedCompareInfo, CompareOptions expectedCompareOptions)
{
bool actualReturnValue = StringComparer.IsWellKnownCultureAwareComparer(comparer, out CompareInfo actualCompareInfo, out CompareOptions actualCompareOptions);
Assert.Equal(expectedCompareInfo != null, actualReturnValue);
Assert.Equal(expectedCompareInfo, actualCompareInfo);
Assert.Equal(expectedCompareOptions, actualCompareOptions);
}
}
private static IEqualityComparer<string> GetNonRandomizedComparer(string name)
{
Type nonRandomizedComparerType = typeof(StringComparer).Assembly.GetType("System.Collections.Generic.NonRandomizedStringEqualityComparer");
Assert.NotNull(nonRandomizedComparerType);
FieldInfo fi = nonRandomizedComparerType.GetField(name, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Static);
Assert.NotNull(fi);
return (IEqualityComparer<string>)fi.GetValue(null);
}
private class CustomStringComparer : StringComparer
{
public override int Compare(string x, string y) => throw new NotImplementedException();
public override bool Equals(string x, string y) => throw new NotImplementedException();
public override int GetHashCode(string obj) => throw new NotImplementedException();
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/Interop/PInvoke/Primitives/Int/PInvokeIntTest.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<Compile Include="PInvokeIntTest.cs" />
</ItemGroup>
<ItemGroup>
<CMakeProjectReference Include="CMakeLists.txt" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<Compile Include="PInvokeIntTest.cs" />
</ItemGroup>
<ItemGroup>
<CMakeProjectReference Include="CMakeLists.txt" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/md/enc/stgio.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// StgIO.h
//
//
// This module handles disk/memory i/o for a generic set of storage solutions,
// including:
// * File system handle (HFILE)
// * IStream
// * User supplied memory buffer (non-movable)
//
// The Read, Write, Seek, ... functions are all directed to the corresponding
// method for each type of file, allowing the consumer to use one set of api's.
//
// File system data can be paged fully into memory in two scenarios:
// read: Normal memory mapped file is created to manage paging.
// write: A custom paging system provides storage for pages as required. This
// data is invalidated when you call Rewrite on the file.
//
// Transactions and backups are handled in the existing file case only. The
// Rewrite function can make a backup of the current contents, and the Restore
// function can be used to recover the data into the current scope. The backup
// file is flushed to disk (which is slower but safer) after the copy. The
// Restore also flushed the recovered changes to disk. Worst case scenario you
// get a crash after calling Rewrite but before Restore, in which case you will
// have a foo.clb.txn file in the same directory as the source file, foo.clb in
// this example.
//<REVISIT_TODO>
// @FUTURE: issues,
// 1. For reading a .clb in an image, it would be great to memory map
// only the portion of the file with the .clb in it.
//</REVISIT_TODO>
//*****************************************************************************
#include "stdafx.h" // Standard headers.
#include "stgio.h" // Our definitions.
#include "corerror.h"
#include "posterror.h"
#include "pedecoder.h"
#include "pedecoder.inl"
//********** Types. ***********************************************************
#define SMALL_ALLOC_MAP_SIZE (64 * 1024) // 64 kb is the minimum size of virtual
// memory you can allocate, so anything
// less is a waste of VM resources.
#define MIN_WRITE_CACHE_BYTES (16 * 1024) // 16 kb for a write back cache
//********** Locals. **********************************************************
HRESULT MapFileError(DWORD error);
static void *AllocateMemory(int iSize);
static void FreeMemory(void *pbData);
inline HRESULT MapFileError(DWORD error)
{
return (PostError(HRESULT_FROM_WIN32(error)));
}
// Static to class.
int StgIO::m_iPageSize=0; // Size of an OS page.
int StgIO::m_iCacheSize=0; // Size for the write cache.
//********** Code. ************************************************************
StgIO::StgIO(
bool bAutoMap) : // Memory map for read on open?
m_bAutoMap(bAutoMap)
{
CtorInit();
// If the system page size has not been queried, do so now.
if (m_iPageSize == 0)
{
SYSTEM_INFO sInfo; // Some O/S information.
// Query the system page size.
GetSystemInfo(&sInfo);
m_iPageSize = sInfo.dwPageSize;
m_iCacheSize = ((MIN_WRITE_CACHE_BYTES - 1) & ~(m_iPageSize - 1)) + m_iPageSize;
}
}
void StgIO::CtorInit()
{
m_bWriteThrough = false;
m_bRewrite = false;
m_bFreeMem = false;
m_pIStream = 0;
m_hFile = INVALID_HANDLE_VALUE;
m_hModule = NULL;
m_hMapping = 0;
m_pBaseData = 0;
m_pData = 0;
m_cbData = 0;
m_fFlags = 0;
m_iType = STGIO_NODATA;
m_cbOffset = 0;
m_rgBuff = 0;
m_cbBuff = 0;
m_rgPageMap = 0;
m_FileType = FILETYPE_UNKNOWN;
m_cRef = 1;
m_mtMappedType = MTYPE_NOMAPPING;
}
StgIO::~StgIO()
{
if (m_rgBuff)
{
FreeMemory(m_rgBuff);
m_rgBuff = 0;
}
Close();
}
//*****************************************************************************
// Open the base file on top of: (a) file, (b) memory buffer, or (c) stream.
// If create flag is specified, then this will create a new file with the
// name supplied. No data is read from an opened file. You must call
// MapFileToMem before doing direct pointer access to the contents.
//*****************************************************************************
HRESULT StgIO::Open( // Return code.
LPCWSTR szName, // Name of the storage.
int fFlags, // How to open the file.
const void *pbBuff, // Optional buffer for memory.
ULONG cbBuff, // Size of buffer.
IStream *pIStream, // Stream for input.
LPSECURITY_ATTRIBUTES pAttributes) // Security token.
{
HRESULT hr;
// If we were given the storage memory to begin with, then use it.
if (pbBuff && cbBuff)
{
_ASSERTE((fFlags & DBPROP_TMODEF_WRITE) == 0);
// Save the memory address and size only. No handles.
m_pData = (void *) pbBuff;
m_cbData = cbBuff;
// All access to data will be by memory provided.
if ((fFlags & DBPROP_TMODEF_SHAREDMEM) == DBPROP_TMODEF_SHAREDMEM)
{
// We're taking ownership of this memory
m_pBaseData = m_pData;
m_iType = STGIO_SHAREDMEM;
}
else
{
m_iType = STGIO_MEM;
}
goto ErrExit;
}
// Check for data backed by a stream pointer.
else if (pIStream)
{
// If this is for the non-create case, get the size of existing data.
if ((fFlags & DBPROP_TMODEF_CREATE) == 0)
{
LARGE_INTEGER iMove = { { 0, 0 } };
ULARGE_INTEGER iSize;
// Need the size of the data so we can map it into memory.
if (FAILED(hr = pIStream->Seek(iMove, STREAM_SEEK_END, &iSize)))
return (hr);
m_cbData = iSize.u.LowPart;
}
// Else there is nothing.
else
m_cbData = 0;
// Save an addref'd copy of the stream.
m_pIStream = pIStream;
m_pIStream->AddRef();
// All access to data will be by memory provided.
m_iType = STGIO_STREAM;
goto ErrExit;
}
// If not on memory, we need a file to do a create/open.
if (!szName || !*szName)
{
return (PostError(E_INVALIDARG));
}
// Check for create of a new file.
else if (fFlags & DBPROP_TMODEF_CREATE)
{
//<REVISIT_TODO>@future: This could chose to open the file in write through
// mode, which would provide better Duribility (from ACID props),
// but would be much slower.</REVISIT_TODO>
// Create the new file, overwriting only if caller allows it.
if ((m_hFile = WszCreateFile(szName, GENERIC_READ | GENERIC_WRITE, 0, 0,
(fFlags & DBPROP_TMODEF_FAILIFTHERE) ? CREATE_NEW : CREATE_ALWAYS,
0, 0)) == INVALID_HANDLE_VALUE)
{
return (MapFileError(GetLastError()));
}
// Data will come from the file.
m_iType = STGIO_HFILE;
}
// For open in read mode, need to open the file on disk. If opening a shared
// memory view, it has to be opened already, so no file open.
else if ((fFlags & DBPROP_TMODEF_WRITE) == 0)
{
// We have not opened the file nor loaded it as module
_ASSERTE(m_hFile == INVALID_HANDLE_VALUE);
_ASSERTE(m_hModule == NULL);
// Open the file for read. Sharing is determined by caller, it can
// allow other readers or be exclusive.
DWORD dwFileSharingFlags = FILE_SHARE_DELETE;
if (!(fFlags & DBPROP_TMODEF_EXCLUSIVE))
{
dwFileSharingFlags |= FILE_SHARE_READ;
#if !defined(DACCESS_COMPILE) && !defined(TARGET_UNIX)
// PEDecoder is not defined in DAC
// We prefer to use LoadLibrary if we can because it will share already loaded images (used for execution)
// which saves virtual memory. We only do this if our caller has indicated that this PE file is trusted
// and thus it is OK to do LoadLibrary (note that we still only load it as a resource, which mitigates
// most of the security risk anyway).
if ((fFlags & DBPROP_TMODEF_TRYLOADLIBRARY) != 0)
{
m_hModule = WszLoadLibraryEx(szName, NULL, LOAD_LIBRARY_AS_IMAGE_RESOURCE);
if (m_hModule != NULL)
{
m_iType = STGIO_HMODULE;
m_mtMappedType = MTYPE_IMAGE;
// LoadLibraryEx returns 2 lowest bits indicating how the module was loaded
m_pBaseData = m_pData = (void *)(((INT_PTR)m_hModule) & ~(INT_PTR)0x3);
PEDecoder peDecoder;
if (SUCCEEDED(peDecoder.Init(
m_pBaseData,
false)) && // relocated
peDecoder.CheckNTHeaders())
{
m_cbData = peDecoder.GetNTHeaders32()->OptionalHeader.SizeOfImage;
}
else
{
// PEDecoder failed on loaded library, let's backout all our changes to this object
// and fall back to file mapping
m_iType = STGIO_NODATA;
m_mtMappedType = MTYPE_NOMAPPING;
m_pBaseData = m_pData = NULL;
FreeLibrary(m_hModule);
m_hModule = NULL;
}
}
}
#endif //!DACCESS_COMPILE && !TARGET_UNIX
}
if (m_hModule == NULL)
{ // We didn't get the loaded module (we either didn't want to or it failed)
HandleHolder hFile(WszCreateFile(szName,
GENERIC_READ,
dwFileSharingFlags,
0,
OPEN_EXISTING,
0,
0));
if (hFile == INVALID_HANDLE_VALUE)
return (MapFileError(GetLastError()));
// Get size of file.
m_cbData = ::SetFilePointer(hFile, 0, 0, FILE_END);
// Can't read anything from an empty file.
if (m_cbData == 0)
return (PostError(CLDB_E_NO_DATA));
// Data will come from the file.
m_hFile = hFile.Extract();
m_iType = STGIO_HFILE;
}
}
ErrExit:
// If we will ever write, then we need the buffer cache.
if (fFlags & DBPROP_TMODEF_WRITE)
{
// Allocate a cache buffer for writing.
if ((m_rgBuff = (BYTE *) AllocateMemory(m_iCacheSize)) == NULL)
{
Close();
return PostError(OutOfMemory());
}
m_cbBuff = 0;
}
// Save flags for later.
m_fFlags = fFlags;
if ((szName != NULL) && (*szName != 0))
{
WCHAR rcExt[_MAX_PATH];
SplitPath(szName, NULL, 0, NULL, 0, NULL, 0, rcExt, _MAX_PATH);
if (SString::_wcsicmp(rcExt, W(".obj")) == 0)
{
m_FileType = FILETYPE_NTOBJ;
}
else if (SString::_wcsicmp(rcExt, W(".tlb")) == 0)
{
m_FileType = FILETYPE_TLB;
}
}
// For auto map case, map the view of the file as part of open.
if (m_bAutoMap &&
(m_iType == STGIO_HFILE || m_iType == STGIO_STREAM) &&
!(fFlags & DBPROP_TMODEF_CREATE))
{
void * ptr;
ULONG cb;
if (FAILED(hr = MapFileToMem(ptr, &cb, pAttributes)))
{
Close();
return hr;
}
}
return S_OK;
} // StgIO::Open
//*****************************************************************************
// Shut down the file handles and allocated objects.
//*****************************************************************************
void StgIO::Close()
{
switch (m_iType)
{
// Free any allocated memory.
case STGIO_SHAREDMEM:
if (m_pBaseData != NULL)
{
CoTaskMemFree(m_pBaseData);
m_pBaseData = NULL;
break;
}
FALLTHROUGH;
case STGIO_MEM:
case STGIO_HFILEMEM:
if (m_bFreeMem && m_pBaseData)
{
FreeMemory(m_pBaseData);
m_pBaseData = m_pData = 0;
}
// Intentional fall through to file case, if we kept handle open.
FALLTHROUGH;
case STGIO_HFILE:
{
// Free the file handle.
if (m_hFile != INVALID_HANDLE_VALUE)
CloseHandle(m_hFile);
// If we allocated space for in memory paging, then free it.
}
break;
case STGIO_HMODULE:
{
if (m_hModule != NULL)
FreeLibrary(m_hModule);
m_hModule = NULL;
break;
}
// Free the stream pointer.
case STGIO_STREAM:
{
if (m_pIStream != NULL)
m_pIStream->Release();
}
break;
// Weird to shut down what you didn't open, isn't it? Allow for
// error case where dtor shuts down as an afterthought.
case STGIO_NODATA:
default:
return;
}
// Free any page map and base data.
FreePageMap();
// Reset state values so we don't get confused.
CtorInit();
}
//*****************************************************************************
// Called to read the data into allocated memory and release the backing store.
// Only available on read-only data.
//*****************************************************************************
HRESULT
StgIO::LoadFileToMemory()
{
HRESULT hr;
void *pData; // Allocated buffer for file.
ULONG cbData; // Size of the data.
ULONG cbRead = 0; // Data actually read.
// Make sure it is a read-only file.
if (m_fFlags & DBPROP_TMODEF_WRITE)
return E_INVALIDARG;
// Try to allocate the buffer.
cbData = m_cbData;
pData = AllocateMemory(cbData);
IfNullGo(pData);
// Try to read the file into the buffer.
IfFailGo(Read(pData, cbData, &cbRead));
if (cbData != cbRead)
{
_ASSERTE_MSG(FALSE, "Read didn't succeed.");
IfFailGo(CLDB_E_FILE_CORRUPT);
}
// Done with the old data.
Close();
// Open with new data.
hr = Open(NULL /* szName */, STGIO_READ, pData, cbData, NULL /* IStream* */, NULL /* lpSecurityAttributes */);
_ASSERTE(SUCCEEDED(hr)); // should not be a failure code path with open on buffer.
// Mark the new memory so that it will be freed later.
m_pBaseData = m_pData;
m_bFreeMem = true;
ErrExit:
if (FAILED(hr) && pData)
FreeMemory(pData);
return hr;
} // StgIO::LoadFileToMemory
//*****************************************************************************
// Read data from the storage source. This will handle all types of backing
// storage from mmf, streams, and file handles. No read ahead or MRU
// caching is done.
//*****************************************************************************
HRESULT StgIO::Read( // Return code.
void *pbBuff, // Write buffer here.
ULONG cbBuff, // How much to read.
ULONG *pcbRead) // How much read.
{
ULONG cbCopy; // For boundary checks.
void *pbData; // Data buffer for mem read.
HRESULT hr = S_OK;
// Validate arguments, don't call if you don't need to.
_ASSERTE(pbBuff != 0);
_ASSERTE(cbBuff > 0);
// Get the data based on type.
switch (m_iType)
{
// For data on file, there are two possiblities:
// (1) We have an in memory backing store we should use, or
// (2) We just need to read from the file.
case STGIO_HFILE:
case STGIO_HMODULE:
{
_ASSERTE((m_hFile != INVALID_HANDLE_VALUE) || (m_hModule != NULL));
// Backing store does its own paging.
if (IsBackingStore() || IsMemoryMapped())
{
// Force the data into memory.
if (FAILED(hr = GetPtrForMem(GetCurrentOffset(), cbBuff, pbData)))
goto ErrExit;
// Copy it back for the user and save the size.
memcpy(pbBuff, pbData, cbBuff);
if (pcbRead)
*pcbRead = cbBuff;
}
// If there is no backing store, this is just a read operation.
else
{
_ASSERTE((m_iType == STGIO_HFILE) && (m_hFile != INVALID_HANDLE_VALUE));
_ASSERTE(m_hModule == NULL);
ULONG cbTemp = 0;
if (!pcbRead)
pcbRead = &cbTemp;
hr = ReadFromDisk(pbBuff, cbBuff, pcbRead);
m_cbOffset += *pcbRead;
}
}
break;
// Data in a stream is always just read.
case STGIO_STREAM:
{
_ASSERTE((IStream *) m_pIStream);
if (!pcbRead)
pcbRead = &cbCopy;
*pcbRead = 0;
hr = m_pIStream->Read(pbBuff, cbBuff, pcbRead);
if (SUCCEEDED(hr))
m_cbOffset += *pcbRead;
}
break;
// Simply copy the data from our data.
case STGIO_MEM:
case STGIO_SHAREDMEM:
case STGIO_HFILEMEM:
{
_ASSERTE(m_pData && m_cbData);
// Check for read past end of buffer and adjust.
if (GetCurrentOffset() + cbBuff > m_cbData)
cbCopy = m_cbData - GetCurrentOffset();
else
cbCopy = cbBuff;
// Copy the data into the callers buffer.
memcpy(pbBuff, (void *) ((DWORD_PTR)m_pData + GetCurrentOffset()), cbCopy);
if (pcbRead)
*pcbRead = cbCopy;
// Save a logical offset.
m_cbOffset += cbCopy;
}
break;
case STGIO_NODATA:
default:
_ASSERTE(0);
break;
}
ErrExit:
return (hr);
}
//*****************************************************************************
// Write to disk. This function will cache up to a page of data in a buffer
// and peridocially flush it on overflow and explicit request. This makes it
// safe to do lots of small writes without too much performance overhead.
//*****************************************************************************
HRESULT StgIO::Write( // true/false.
const void *pbBuff, // Data to write.
ULONG cbWrite, // How much data to write.
ULONG *pcbWritten) // How much did get written.
{
ULONG cbWriteIn=cbWrite; // Track amount written.
ULONG cbCopy;
HRESULT hr = S_OK;
_ASSERTE(m_rgBuff != 0);
_ASSERTE(cbWrite);
while (cbWrite)
{
// In the case where the buffer is already huge, write the whole thing
// and avoid the cache.
if (m_cbBuff == 0 && cbWrite >= (ULONG) m_iPageSize)
{
if (SUCCEEDED(hr = WriteToDisk(pbBuff, cbWrite, pcbWritten)))
m_cbOffset += cbWrite;
break;
}
// Otherwise cache as much as we can and flush.
else
{
// Determine how much data goes into the cache buffer.
cbCopy = m_iPageSize - m_cbBuff;
cbCopy = min(cbCopy, cbWrite);
// Copy the data into the cache and adjust counts.
memcpy(&m_rgBuff[m_cbBuff], pbBuff, cbCopy);
pbBuff = (void *) ((DWORD_PTR)pbBuff + cbCopy);
m_cbBuff += cbCopy;
m_cbOffset += cbCopy;
cbWrite -= cbCopy;
// If there is enough data, then flush it to disk and reset count.
if (m_cbBuff >= (ULONG) m_iPageSize)
{
if (FAILED(hr = FlushCache()))
break;
}
}
}
// Return value for caller.
if (SUCCEEDED(hr) && pcbWritten)
*pcbWritten = cbWriteIn;
return (hr);
}
//*****************************************************************************
// Moves the file pointer to the new location. This handles the different
// types of storage systems.
//*****************************************************************************
HRESULT StgIO::Seek( // New offset.
int lVal, // How much to move.
ULONG fMoveType) // Direction, use Win32 FILE_xxxx.
{
ULONG cbRtn = 0;
HRESULT hr = NOERROR;
_ASSERTE(fMoveType >= FILE_BEGIN && fMoveType <= FILE_END);
// Action taken depends on type of storage.
switch (m_iType)
{
case STGIO_HFILE:
{
// Use the file system's move.
_ASSERTE(m_hFile != INVALID_HANDLE_VALUE);
cbRtn = ::SetFilePointer(m_hFile, lVal, 0, fMoveType);
// Save the location redundantly.
if (cbRtn != 0xffffffff)
{
// make sure that m_cbOffset will stay within range
if (cbRtn > m_cbData || cbRtn < 0)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = cbRtn;
}
}
break;
case STGIO_STREAM:
{
LARGE_INTEGER iMove;
ULARGE_INTEGER iNewLoc;
// Need a 64-bit int.
iMove.QuadPart = lVal;
// The move types are named differently, but have same value.
if (FAILED(hr = m_pIStream->Seek(iMove, fMoveType, &iNewLoc)))
return (hr);
// make sure that m_cbOffset will stay within range
if (iNewLoc.u.LowPart > m_cbData || iNewLoc.u.LowPart < 0)
IfFailGo(STG_E_INVALIDFUNCTION);
// Save off only out location.
m_cbOffset = iNewLoc.u.LowPart;
}
break;
case STGIO_MEM:
case STGIO_SHAREDMEM:
case STGIO_HFILEMEM:
case STGIO_HMODULE:
{
// We own the offset, so change our value.
switch (fMoveType)
{
case FILE_BEGIN:
// make sure that m_cbOffset will stay within range
if ((ULONG) lVal > m_cbData || lVal < 0)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = lVal;
break;
case FILE_CURRENT:
// make sure that m_cbOffset will stay within range
if (m_cbOffset + lVal > m_cbData)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = m_cbOffset + lVal;
break;
case FILE_END:
_ASSERTE(lVal < (LONG) m_cbData);
// make sure that m_cbOffset will stay within range
if (m_cbData + lVal > m_cbData)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = m_cbData + lVal;
break;
}
cbRtn = m_cbOffset;
}
break;
// Weird to seek with no data.
case STGIO_NODATA:
default:
_ASSERTE(0);
break;
}
ErrExit:
return hr;
}
//*****************************************************************************
// Retrieves the current offset for the storage being used. This value is
// tracked based on Read, Write, and Seek operations.
//*****************************************************************************
ULONG StgIO::GetCurrentOffset() // Current offset.
{
return (m_cbOffset);
}
//*****************************************************************************
// Map the file contents to a memory mapped file and return a pointer to the
// data. For read/write with a backing store, map the file using an internal
// paging system.
//*****************************************************************************
HRESULT StgIO::MapFileToMem( // Return code.
void *&ptr, // Return pointer to file data.
ULONG *pcbSize, // Return size of data.
LPSECURITY_ATTRIBUTES pAttributes) // Security token.
{
char rcShared[MAXSHMEM]; // ANSI version of shared name.
HRESULT hr = S_OK;
// Don't penalize for multiple calls. Also, allow calls for mem type so
// callers don't need to do so much checking.
if (IsBackingStore() ||
IsMemoryMapped() ||
(m_iType == STGIO_MEM) ||
(m_iType == STGIO_SHAREDMEM) ||
(m_iType == STGIO_HFILEMEM))
{
ptr = m_pData;
if (pcbSize)
*pcbSize = m_cbData;
return (S_OK);
}
//#CopySmallFiles
// Check the size of the data we want to map. If it is small enough, then
// simply allocate a chunk of memory from a finer grained heap. This saves
// virtual memory space, page table entries, and should reduce overall working set.
// Also, open for read/write needs a full backing store.
if ((m_cbData <= SMALL_ALLOC_MAP_SIZE) && (SMALL_ALLOC_MAP_SIZE > 0))
{
DWORD cbRead = m_cbData;
_ASSERTE(m_pData == 0);
// Just malloc a chunk of data to use.
m_pBaseData = m_pData = AllocateMemory(m_cbData);
if (!m_pData)
{
hr = OutOfMemory();
goto ErrExit;
}
// Read all of the file contents into this piece of memory.
IfFailGo( Seek(0, FILE_BEGIN) );
if (FAILED(hr = Read(m_pData, cbRead, &cbRead)))
{
FreeMemory(m_pData);
m_pData = 0;
goto ErrExit;
}
_ASSERTE(cbRead == m_cbData);
// If the file isn't being opened for exclusive mode, then free it.
// If it is for exclusive, then we need to keep the handle open so the
// file is locked, preventing other readers. Also leave it open if
// in read/write mode so we can truncate and rewrite.
if (m_hFile == INVALID_HANDLE_VALUE ||
((m_fFlags & DBPROP_TMODEF_EXCLUSIVE) == 0 && (m_fFlags & DBPROP_TMODEF_WRITE) == 0))
{
// If there was a handle open, then free it.
if (m_hFile != INVALID_HANDLE_VALUE)
{
VERIFY(CloseHandle(m_hFile));
m_hFile = INVALID_HANDLE_VALUE;
}
// Free the stream pointer.
else
if (m_pIStream != 0)
{
m_pIStream->Release();
m_pIStream = 0;
}
// Switch the type to memory only access.
m_iType = STGIO_MEM;
}
else
m_iType = STGIO_HFILEMEM;
// Free the memory when we shut down.
m_bFreeMem = true;
}
// Finally, a real mapping file must be created.
else
{
// Now we will map, so better have it right.
_ASSERTE(m_hFile != INVALID_HANDLE_VALUE || m_iType == STGIO_STREAM);
_ASSERTE(m_rgPageMap == 0);
// For read mode, use a memory mapped file since the size will never
// change for the life of the handle.
if ((m_fFlags & DBPROP_TMODEF_WRITE) == 0 && m_iType != STGIO_STREAM)
{
// Create a mapping object for the file.
_ASSERTE(m_hMapping == 0);
DWORD dwProtectionFlags = PAGE_READONLY;
if ((m_hMapping = WszCreateFileMapping(m_hFile, pAttributes, dwProtectionFlags,
0, 0, nullptr)) == 0)
{
return (MapFileError(GetLastError()));
}
m_mtMappedType = MTYPE_FLAT;
// Check to see if the memory already exists, in which case we have
// no guarantees it is the right piece of data.
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
hr = PostError(CLDB_E_SMDUPLICATE, rcShared);
goto ErrExit;
}
// Now map the file into memory so we can read from pointer access.
// <REVISIT_TODO>Note: Added a check for IsBadReadPtr per the Services team which
// indicates that under some conditions this API can give you back
// a totally bogus pointer.</REVISIT_TODO>
if ((m_pBaseData = m_pData = MapViewOfFile(m_hMapping, FILE_MAP_READ,
0, 0, 0)) == 0)
{
hr = MapFileError(GetLastError());
if (SUCCEEDED(hr))
{
_ASSERTE_MSG(FALSE, "Error code doesn't indicate error.");
hr = PostError(CLDB_E_FILE_CORRUPT);
}
// In case we got back a bogus pointer.
m_pBaseData = m_pData = NULL;
goto ErrExit;
}
}
// In write mode, we need the hybrid combination of being able to back up
// the data in memory via cache, but then later rewrite the contents and
// throw away our cached copy. Memory mapped files are not good for this
// case due to poor write characteristics.
else
{
ULONG iMaxSize; // How much memory required for file.
// Figure out how many pages we'll require, round up actual data
// size to page size.
iMaxSize = (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize);
// Check integer overflow in previous statement
if (iMaxSize < m_cbData)
{
IfFailGo(PostError(COR_E_OVERFLOW));
}
// Allocate a bit vector to track loaded pages.
if ((m_rgPageMap = new (nothrow) BYTE[iMaxSize / m_iPageSize]) == 0)
return (PostError(OutOfMemory()));
memset(m_rgPageMap, 0, sizeof(BYTE) * (iMaxSize / m_iPageSize));
// Allocate space for the file contents.
if ((m_pBaseData = m_pData = ::ClrVirtualAlloc(0, iMaxSize, MEM_RESERVE, PAGE_NOACCESS)) == 0)
{
hr = PostError(OutOfMemory());
goto ErrExit;
}
}
}
// Reset any changes made by mapping.
IfFailGo( Seek(0, FILE_BEGIN) );
ErrExit:
// Check for errors and clean up.
if (FAILED(hr))
{
if (m_hMapping)
CloseHandle(m_hMapping);
m_hMapping = 0;
m_pBaseData = m_pData = 0;
m_cbData = 0;
}
ptr = m_pData;
if (pcbSize)
*pcbSize = m_cbData;
return (hr);
}
//*****************************************************************************
// Free the mapping object for shared memory but keep the rest of the internal
// state intact.
//*****************************************************************************
HRESULT StgIO::ReleaseMappingObject() // Return code.
{
// Check type first.
if (m_iType != STGIO_SHAREDMEM)
{
_ASSERTE(FALSE);
return S_OK;
}
// Must have an allocated handle.
_ASSERTE(m_hMapping != 0);
// Freeing the mapping object doesn't do any good if you still have the file.
_ASSERTE(m_hFile == INVALID_HANDLE_VALUE);
// Unmap the memory we allocated before freeing the handle. But keep the
// memory address intact.
if (m_pData)
VERIFY(UnmapViewOfFile(m_pData));
// Free the handle.
if (m_hMapping != 0)
{
VERIFY(CloseHandle(m_hMapping));
m_hMapping = 0;
}
return S_OK;
}
//*****************************************************************************
// Resets the logical base address and size to the value given. This is for
// cases like finding a section embedded in another format, like the .clb inside
// of an image. GetPtrForMem, Read, and Seek will then behave as though only
// data from pbStart to cbSize is valid.
//*****************************************************************************
HRESULT StgIO::SetBaseRange( // Return code.
void *pbStart, // Start of file data.
ULONG cbSize) // How big is the range.
{
if (m_iType == STGIO_SHAREDMEM)
{
// The base range must be inside of the current range.
_ASSERTE((m_pBaseData != NULL) && (m_cbData != 0));
_ASSERTE(((LONG_PTR) pbStart >= (LONG_PTR) m_pBaseData));
_ASSERTE(((LONG_PTR) pbStart + cbSize <= (LONG_PTR) m_pBaseData + m_cbData));
}
// Save the base range per user request.
m_pData = pbStart;
m_cbData = cbSize;
return S_OK;
}
//*****************************************************************************
// Caller wants a pointer to a chunk of the file. This function will make sure
// that the memory for that chunk has been committed and will load from the
// file if required. This algorithm attempts to load no more data from disk
// than is necessary. It walks the required pages from lowest to highest,
// and for each block of unloaded pages, the memory is committed and the data
// is read from disk. If all pages are unloaded, all of them are loaded at
// once to speed throughput from disk.
//*****************************************************************************
HRESULT StgIO::GetPtrForMem( // Return code.
ULONG cbStart, // Where to start getting memory.
ULONG cbSize, // How much data.
void *&ptr) // Return pointer to memory here.
{
int iFirst, iLast; // First and last page required.
ULONG iOffset, iSize; // For committing ranges of memory.
int i, j; // Loop control.
HRESULT hr;
// We need either memory (mmf or user supplied) or a backing store to
// return a pointer. Call Read if you don't have these.
if (!IsBackingStore() && m_pData == 0)
return (PostError(BadError(E_UNEXPECTED)));
// Validate the caller isn't asking for a data value out of range.
if (!(ClrSafeInt<ULONG>::addition(cbStart, cbSize, iOffset)
&& (iOffset <= m_cbData)))
return (PostError(E_INVALIDARG));
// This code will check for pages that need to be paged from disk in
// order for us to return a pointer to that memory.
if (IsBackingStore())
{
// Backing store is bogus when in rewrite mode.
if (m_bRewrite)
return (PostError(BadError(E_UNEXPECTED)));
// Must have the page map to continue.
_ASSERTE(m_rgPageMap && m_iPageSize && m_pData);
// Figure out the first and last page that are required for commit.
iFirst = cbStart / m_iPageSize;
iLast = (cbStart + cbSize - 1) / m_iPageSize;
// Avoid confusion.
ptr = 0;
// Do a smart load of every page required. Do not reload pages that have
// already been brought in from disk.
//<REVISIT_TODO>@FUTURE: add an optimization so that when all pages have been faulted, we no
// longer to a page by page search.</REVISIT_TODO>
for (i=iFirst; i<=iLast; )
{
// Find the first page that hasn't already been loaded.
while (GetBit(m_rgPageMap, i) && i<=iLast)
++i;
if (i > iLast)
break;
// Offset for first thing to load.
iOffset = i * m_iPageSize;
iSize = 0;
// See how many in a row have not been loaded.
for (j=i; i<=iLast && !GetBit(m_rgPageMap, i); i++)
{
// Safe: iSize += m_iPageSize;
if (!(ClrSafeInt<ULONG>::addition(iSize, m_iPageSize, iSize)))
{
return PostError(E_INVALIDARG);
}
}
// First commit the memory for this part of the file.
if (::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset),
iSize, MEM_COMMIT, PAGE_READWRITE) == 0)
return (PostError(OutOfMemory()));
// Now load that portion of the file from disk.
if (FAILED(hr = Seek(iOffset, FILE_BEGIN)) ||
FAILED(hr = ReadFromDisk((void *) ((DWORD_PTR) m_pData + iOffset), iSize, 0)))
{
return (hr);
}
// Change the memory to read only to avoid any modifications. Any faults
// that occur indicate a bug whereby the engine is trying to write to
// protected memory.
_ASSERTE(::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset),
iSize, MEM_COMMIT, PAGE_READONLY) != 0);
// Record each new loaded page.
for (; j<i; j++)
SetBit(m_rgPageMap, j, true);
}
// Everything was brought into memory, so now return pointer to caller.
ptr = (void *) ((DWORD_PTR) m_pData + cbStart);
}
// Memory version or memory mapped file work the same way.
else if (IsMemoryMapped() ||
(m_iType == STGIO_MEM) ||
(m_iType == STGIO_SHAREDMEM) ||
(m_iType == STGIO_HFILEMEM))
{
if (!(cbStart <= m_cbData))
return (PostError(E_INVALIDARG));
ptr = (void *) ((DWORD_PTR) m_pData + cbStart);
}
// What's left?! Add some defense.
else
{
_ASSERTE(0);
ptr = 0;
return (PostError(BadError(E_UNEXPECTED)));
}
return (S_OK);
}
//*****************************************************************************
// For cached writes, flush the cache to the data store.
//*****************************************************************************
HRESULT StgIO::FlushCache()
{
ULONG cbWritten;
HRESULT hr;
if (m_cbBuff)
{
if (FAILED(hr = WriteToDisk(m_rgBuff, m_cbBuff, &cbWritten)))
return (hr);
m_cbBuff = 0;
}
return (S_OK);
}
//*****************************************************************************
// Tells the file system to flush any cached data it may have. This is
// expensive, but if successful guarantees you won't lose writes short of
// a disk failure.
//*****************************************************************************
HRESULT StgIO::FlushFileBuffers()
{
_ASSERTE(!IsReadOnly());
if (m_hFile != INVALID_HANDLE_VALUE)
{
if (::FlushFileBuffers(m_hFile))
return (S_OK);
else
return (MapFileError(GetLastError()));
}
return (S_OK);
}
//*****************************************************************************
// Called after a successful rewrite of an existing file. The in memory
// backing store is no longer valid because all new data is in memory and
// on disk. This is essentially the same state as created, so free up some
// working set and remember this state.
//*****************************************************************************
HRESULT StgIO::ResetBackingStore() // Return code.
{
// Don't be calling this function for read only data.
_ASSERTE(!IsReadOnly());
// Free up any backing store data we no longer need now that everything
// is in memory.
FreePageMap();
return (S_OK);
}
//
// Private.
//
//*****************************************************************************
// This version will force the data in cache out to disk for real. The code
// can handle the different types of storage we might be sitting on based on
// the open type.
//*****************************************************************************
HRESULT StgIO::WriteToDisk( // Return code.
const void *pbBuff, // Buffer to write.
ULONG cbWrite, // How much.
ULONG *pcbWritten) // Return how much written.
{
ULONG cbWritten; // Buffer for write funcs.
HRESULT hr = S_OK;
// Pretty obvious.
_ASSERTE(!IsReadOnly());
// Always need a buffer to write this data to.
if (!pcbWritten)
pcbWritten = &cbWritten;
// Action taken depends on type of storage.
switch (m_iType)
{
case STGIO_HFILE:
case STGIO_HFILEMEM:
{
// Use the file system's move.
_ASSERTE(m_hFile != INVALID_HANDLE_VALUE);
// Do the write to disk.
if (!::WriteFile(m_hFile, pbBuff, cbWrite, pcbWritten, 0))
hr = MapFileError(GetLastError());
}
break;
// Free the stream pointer.
case STGIO_STREAM:
{
// Delegate write to stream code.
hr = m_pIStream->Write(pbBuff, cbWrite, pcbWritten);
}
break;
// We cannot write to fixed read/only memory or LoadLibrary module.
case STGIO_HMODULE:
case STGIO_MEM:
case STGIO_SHAREDMEM:
_ASSERTE(0);
hr = BadError(E_UNEXPECTED);
break;
// Weird to seek with no data.
case STGIO_NODATA:
default:
_ASSERTE(0);
break;
}
return (hr);
}
//*****************************************************************************
// This version only reads from disk.
//*****************************************************************************
HRESULT StgIO::ReadFromDisk( // Return code.
void *pbBuff, // Write buffer here.
ULONG cbBuff, // How much to read.
ULONG *pcbRead) // How much read.
{
ULONG cbRead;
_ASSERTE(m_iType == STGIO_HFILE || m_iType == STGIO_STREAM);
// Need to have a buffer.
if (!pcbRead)
pcbRead = &cbRead;
// Read only from file to avoid recursive logic.
if (m_iType == STGIO_HFILE || m_iType == STGIO_HFILEMEM)
{
if (::ReadFile(m_hFile, pbBuff, cbBuff, pcbRead, 0))
return (S_OK);
return (MapFileError(GetLastError()));
}
// Read directly from stream.
else
{
return (m_pIStream->Read(pbBuff, cbBuff, pcbRead));
}
}
//*****************************************************************************
// Copy the contents of the file for this storage to the target path.
//*****************************************************************************
HRESULT StgIO::CopyFileInternal( // Return code.
LPCWSTR szTo, // Target save path for file.
int bFailIfThere, // true to fail if target exists.
int bWriteThrough) // Should copy be written through OS cache.
{
DWORD iCurrent; // Save original location.
DWORD cbRead; // Byte count for buffer.
DWORD cbWrite; // Check write of bytes.
const DWORD cbBuff = 4096; // Size of buffer for copy (in bytes).
BYTE *pBuff = (BYTE*)alloca(cbBuff); // Buffer for copy.
HANDLE hFile; // Target file.
HRESULT hr = S_OK;
// Create target file.
if ((hFile = ::WszCreateFile(szTo, GENERIC_WRITE, 0, 0,
(bFailIfThere) ? CREATE_NEW : CREATE_ALWAYS,
(bWriteThrough) ? FILE_FLAG_WRITE_THROUGH : 0,
0)) == INVALID_HANDLE_VALUE)
{
return (MapFileError(GetLastError()));
}
// Save current location and reset it later.
iCurrent = ::SetFilePointer(m_hFile, 0, 0, FILE_CURRENT);
::SetFilePointer(m_hFile, 0, 0, FILE_BEGIN);
// Copy while there are bytes.
while (::ReadFile(m_hFile, pBuff, cbBuff, &cbRead, 0) && cbRead)
{
if (!::WriteFile(hFile, pBuff, cbRead, &cbWrite, 0) || cbWrite != cbRead)
{
hr = STG_E_WRITEFAULT;
break;
}
}
// Reset file offset.
::SetFilePointer(m_hFile, iCurrent, 0, FILE_BEGIN);
// Close target.
if (!bWriteThrough)
VERIFY(::FlushFileBuffers(hFile));
::CloseHandle(hFile);
return (hr);
}
//*****************************************************************************
// Free the data used for backing store from disk in read/write scenario.
//*****************************************************************************
void StgIO::FreePageMap()
{
// If a small file was allocated, then free that memory.
if (m_bFreeMem && m_pBaseData)
FreeMemory(m_pBaseData);
// For mmf, close handles and free resources.
else if (m_hMapping && m_pBaseData)
{
VERIFY(UnmapViewOfFile(m_pBaseData));
VERIFY(CloseHandle(m_hMapping));
}
// For our own system, free memory.
else if (m_rgPageMap && m_pBaseData)
{
delete [] m_rgPageMap;
m_rgPageMap = 0;
VERIFY(::ClrVirtualFree(m_pBaseData, (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize), MEM_DECOMMIT));
VERIFY(::ClrVirtualFree(m_pBaseData, 0, MEM_RELEASE));
m_pBaseData = 0;
m_cbData = 0;
}
m_pBaseData = 0;
m_hMapping = 0;
m_cbData = 0;
}
//*****************************************************************************
// Check the given pointer and ensure it is aligned correct. Return true
// if it is aligned, false if it is not.
//*****************************************************************************
int StgIO::IsAlignedPtr(ULONG_PTR Value, int iAlignment)
{
HRESULT hr;
void *ptrStart = NULL;
if ((m_iType == STGIO_STREAM) ||
(m_iType == STGIO_SHAREDMEM) ||
(m_iType == STGIO_MEM))
{
return ((Value - (ULONG_PTR) m_pData) % iAlignment == 0);
}
else
{
hr = GetPtrForMem(0, 1, ptrStart);
_ASSERTE(hr == S_OK && "GetPtrForMem failed");
_ASSERTE(Value > (ULONG_PTR) ptrStart);
return (((Value - (ULONG_PTR) ptrStart) % iAlignment) == 0);
}
} // int StgIO::IsAlignedPtr()
//*****************************************************************************
// These helper functions are used to allocate fairly large pieces of memory,
// more than should be taken from the runtime heap, but less that would require
// virtual memory overhead.
//*****************************************************************************
// #define _TRACE_MEM_ 1
void *AllocateMemory(int iSize)
{
void * ptr;
ptr = new (nothrow) BYTE[iSize];
#if defined(_DEBUG) && defined(_TRACE_MEM_)
static int i=0;
DbgWriteEx(W("AllocateMemory: (%d) 0x%08x, size %d\n"), ++i, ptr, iSize);
#endif
return (ptr);
}
void FreeMemory(void *pbData)
{
#if defined(_DEBUG) && defined(_TRACE_MEM_)
static int i=0;
DbgWriteEx(W("FreeMemory: (%d) 0x%08x\n"), ++i, pbData);
#endif
_ASSERTE(pbData);
delete [] (BYTE *) pbData;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// StgIO.h
//
//
// This module handles disk/memory i/o for a generic set of storage solutions,
// including:
// * File system handle (HFILE)
// * IStream
// * User supplied memory buffer (non-movable)
//
// The Read, Write, Seek, ... functions are all directed to the corresponding
// method for each type of file, allowing the consumer to use one set of api's.
//
// File system data can be paged fully into memory in two scenarios:
// read: Normal memory mapped file is created to manage paging.
// write: A custom paging system provides storage for pages as required. This
// data is invalidated when you call Rewrite on the file.
//
// Transactions and backups are handled in the existing file case only. The
// Rewrite function can make a backup of the current contents, and the Restore
// function can be used to recover the data into the current scope. The backup
// file is flushed to disk (which is slower but safer) after the copy. The
// Restore also flushed the recovered changes to disk. Worst case scenario you
// get a crash after calling Rewrite but before Restore, in which case you will
// have a foo.clb.txn file in the same directory as the source file, foo.clb in
// this example.
//<REVISIT_TODO>
// @FUTURE: issues,
// 1. For reading a .clb in an image, it would be great to memory map
// only the portion of the file with the .clb in it.
//</REVISIT_TODO>
//*****************************************************************************
#include "stdafx.h" // Standard headers.
#include "stgio.h" // Our definitions.
#include "corerror.h"
#include "posterror.h"
#include "pedecoder.h"
#include "pedecoder.inl"
//********** Types. ***********************************************************
#define SMALL_ALLOC_MAP_SIZE (64 * 1024) // 64 kb is the minimum size of virtual
// memory you can allocate, so anything
// less is a waste of VM resources.
#define MIN_WRITE_CACHE_BYTES (16 * 1024) // 16 kb for a write back cache
//********** Locals. **********************************************************
HRESULT MapFileError(DWORD error);
static void *AllocateMemory(int iSize);
static void FreeMemory(void *pbData);
inline HRESULT MapFileError(DWORD error)
{
return (PostError(HRESULT_FROM_WIN32(error)));
}
// Static to class.
int StgIO::m_iPageSize=0; // Size of an OS page.
int StgIO::m_iCacheSize=0; // Size for the write cache.
//********** Code. ************************************************************
StgIO::StgIO(
bool bAutoMap) : // Memory map for read on open?
m_bAutoMap(bAutoMap)
{
CtorInit();
// If the system page size has not been queried, do so now.
if (m_iPageSize == 0)
{
SYSTEM_INFO sInfo; // Some O/S information.
// Query the system page size.
GetSystemInfo(&sInfo);
m_iPageSize = sInfo.dwPageSize;
m_iCacheSize = ((MIN_WRITE_CACHE_BYTES - 1) & ~(m_iPageSize - 1)) + m_iPageSize;
}
}
void StgIO::CtorInit()
{
m_bWriteThrough = false;
m_bRewrite = false;
m_bFreeMem = false;
m_pIStream = 0;
m_hFile = INVALID_HANDLE_VALUE;
m_hModule = NULL;
m_hMapping = 0;
m_pBaseData = 0;
m_pData = 0;
m_cbData = 0;
m_fFlags = 0;
m_iType = STGIO_NODATA;
m_cbOffset = 0;
m_rgBuff = 0;
m_cbBuff = 0;
m_rgPageMap = 0;
m_FileType = FILETYPE_UNKNOWN;
m_cRef = 1;
m_mtMappedType = MTYPE_NOMAPPING;
}
StgIO::~StgIO()
{
if (m_rgBuff)
{
FreeMemory(m_rgBuff);
m_rgBuff = 0;
}
Close();
}
//*****************************************************************************
// Open the base file on top of: (a) file, (b) memory buffer, or (c) stream.
// If create flag is specified, then this will create a new file with the
// name supplied. No data is read from an opened file. You must call
// MapFileToMem before doing direct pointer access to the contents.
//*****************************************************************************
HRESULT StgIO::Open( // Return code.
LPCWSTR szName, // Name of the storage.
int fFlags, // How to open the file.
const void *pbBuff, // Optional buffer for memory.
ULONG cbBuff, // Size of buffer.
IStream *pIStream, // Stream for input.
LPSECURITY_ATTRIBUTES pAttributes) // Security token.
{
HRESULT hr;
// If we were given the storage memory to begin with, then use it.
if (pbBuff && cbBuff)
{
_ASSERTE((fFlags & DBPROP_TMODEF_WRITE) == 0);
// Save the memory address and size only. No handles.
m_pData = (void *) pbBuff;
m_cbData = cbBuff;
// All access to data will be by memory provided.
if ((fFlags & DBPROP_TMODEF_SHAREDMEM) == DBPROP_TMODEF_SHAREDMEM)
{
// We're taking ownership of this memory
m_pBaseData = m_pData;
m_iType = STGIO_SHAREDMEM;
}
else
{
m_iType = STGIO_MEM;
}
goto ErrExit;
}
// Check for data backed by a stream pointer.
else if (pIStream)
{
// If this is for the non-create case, get the size of existing data.
if ((fFlags & DBPROP_TMODEF_CREATE) == 0)
{
LARGE_INTEGER iMove = { { 0, 0 } };
ULARGE_INTEGER iSize;
// Need the size of the data so we can map it into memory.
if (FAILED(hr = pIStream->Seek(iMove, STREAM_SEEK_END, &iSize)))
return (hr);
m_cbData = iSize.u.LowPart;
}
// Else there is nothing.
else
m_cbData = 0;
// Save an addref'd copy of the stream.
m_pIStream = pIStream;
m_pIStream->AddRef();
// All access to data will be by memory provided.
m_iType = STGIO_STREAM;
goto ErrExit;
}
// If not on memory, we need a file to do a create/open.
if (!szName || !*szName)
{
return (PostError(E_INVALIDARG));
}
// Check for create of a new file.
else if (fFlags & DBPROP_TMODEF_CREATE)
{
//<REVISIT_TODO>@future: This could chose to open the file in write through
// mode, which would provide better Duribility (from ACID props),
// but would be much slower.</REVISIT_TODO>
// Create the new file, overwriting only if caller allows it.
if ((m_hFile = WszCreateFile(szName, GENERIC_READ | GENERIC_WRITE, 0, 0,
(fFlags & DBPROP_TMODEF_FAILIFTHERE) ? CREATE_NEW : CREATE_ALWAYS,
0, 0)) == INVALID_HANDLE_VALUE)
{
return (MapFileError(GetLastError()));
}
// Data will come from the file.
m_iType = STGIO_HFILE;
}
// For open in read mode, need to open the file on disk. If opening a shared
// memory view, it has to be opened already, so no file open.
else if ((fFlags & DBPROP_TMODEF_WRITE) == 0)
{
// We have not opened the file nor loaded it as module
_ASSERTE(m_hFile == INVALID_HANDLE_VALUE);
_ASSERTE(m_hModule == NULL);
// Open the file for read. Sharing is determined by caller, it can
// allow other readers or be exclusive.
DWORD dwFileSharingFlags = FILE_SHARE_DELETE;
if (!(fFlags & DBPROP_TMODEF_EXCLUSIVE))
{
dwFileSharingFlags |= FILE_SHARE_READ;
#if !defined(DACCESS_COMPILE) && !defined(TARGET_UNIX)
// PEDecoder is not defined in DAC
// We prefer to use LoadLibrary if we can because it will share already loaded images (used for execution)
// which saves virtual memory. We only do this if our caller has indicated that this PE file is trusted
// and thus it is OK to do LoadLibrary (note that we still only load it as a resource, which mitigates
// most of the security risk anyway).
if ((fFlags & DBPROP_TMODEF_TRYLOADLIBRARY) != 0)
{
m_hModule = WszLoadLibraryEx(szName, NULL, LOAD_LIBRARY_AS_IMAGE_RESOURCE);
if (m_hModule != NULL)
{
m_iType = STGIO_HMODULE;
m_mtMappedType = MTYPE_IMAGE;
// LoadLibraryEx returns 2 lowest bits indicating how the module was loaded
m_pBaseData = m_pData = (void *)(((INT_PTR)m_hModule) & ~(INT_PTR)0x3);
PEDecoder peDecoder;
if (SUCCEEDED(peDecoder.Init(
m_pBaseData,
false)) && // relocated
peDecoder.CheckNTHeaders())
{
m_cbData = peDecoder.GetNTHeaders32()->OptionalHeader.SizeOfImage;
}
else
{
// PEDecoder failed on loaded library, let's backout all our changes to this object
// and fall back to file mapping
m_iType = STGIO_NODATA;
m_mtMappedType = MTYPE_NOMAPPING;
m_pBaseData = m_pData = NULL;
FreeLibrary(m_hModule);
m_hModule = NULL;
}
}
}
#endif //!DACCESS_COMPILE && !TARGET_UNIX
}
if (m_hModule == NULL)
{ // We didn't get the loaded module (we either didn't want to or it failed)
HandleHolder hFile(WszCreateFile(szName,
GENERIC_READ,
dwFileSharingFlags,
0,
OPEN_EXISTING,
0,
0));
if (hFile == INVALID_HANDLE_VALUE)
return (MapFileError(GetLastError()));
// Get size of file.
m_cbData = ::SetFilePointer(hFile, 0, 0, FILE_END);
// Can't read anything from an empty file.
if (m_cbData == 0)
return (PostError(CLDB_E_NO_DATA));
// Data will come from the file.
m_hFile = hFile.Extract();
m_iType = STGIO_HFILE;
}
}
ErrExit:
// If we will ever write, then we need the buffer cache.
if (fFlags & DBPROP_TMODEF_WRITE)
{
// Allocate a cache buffer for writing.
if ((m_rgBuff = (BYTE *) AllocateMemory(m_iCacheSize)) == NULL)
{
Close();
return PostError(OutOfMemory());
}
m_cbBuff = 0;
}
// Save flags for later.
m_fFlags = fFlags;
if ((szName != NULL) && (*szName != 0))
{
WCHAR rcExt[_MAX_PATH];
SplitPath(szName, NULL, 0, NULL, 0, NULL, 0, rcExt, _MAX_PATH);
if (SString::_wcsicmp(rcExt, W(".obj")) == 0)
{
m_FileType = FILETYPE_NTOBJ;
}
else if (SString::_wcsicmp(rcExt, W(".tlb")) == 0)
{
m_FileType = FILETYPE_TLB;
}
}
// For auto map case, map the view of the file as part of open.
if (m_bAutoMap &&
(m_iType == STGIO_HFILE || m_iType == STGIO_STREAM) &&
!(fFlags & DBPROP_TMODEF_CREATE))
{
void * ptr;
ULONG cb;
if (FAILED(hr = MapFileToMem(ptr, &cb, pAttributes)))
{
Close();
return hr;
}
}
return S_OK;
} // StgIO::Open
//*****************************************************************************
// Shut down the file handles and allocated objects.
//*****************************************************************************
void StgIO::Close()
{
switch (m_iType)
{
// Free any allocated memory.
case STGIO_SHAREDMEM:
if (m_pBaseData != NULL)
{
CoTaskMemFree(m_pBaseData);
m_pBaseData = NULL;
break;
}
FALLTHROUGH;
case STGIO_MEM:
case STGIO_HFILEMEM:
if (m_bFreeMem && m_pBaseData)
{
FreeMemory(m_pBaseData);
m_pBaseData = m_pData = 0;
}
// Intentional fall through to file case, if we kept handle open.
FALLTHROUGH;
case STGIO_HFILE:
{
// Free the file handle.
if (m_hFile != INVALID_HANDLE_VALUE)
CloseHandle(m_hFile);
// If we allocated space for in memory paging, then free it.
}
break;
case STGIO_HMODULE:
{
if (m_hModule != NULL)
FreeLibrary(m_hModule);
m_hModule = NULL;
break;
}
// Free the stream pointer.
case STGIO_STREAM:
{
if (m_pIStream != NULL)
m_pIStream->Release();
}
break;
// Weird to shut down what you didn't open, isn't it? Allow for
// error case where dtor shuts down as an afterthought.
case STGIO_NODATA:
default:
return;
}
// Free any page map and base data.
FreePageMap();
// Reset state values so we don't get confused.
CtorInit();
}
//*****************************************************************************
// Called to read the data into allocated memory and release the backing store.
// Only available on read-only data.
//*****************************************************************************
HRESULT
StgIO::LoadFileToMemory()
{
HRESULT hr;
void *pData; // Allocated buffer for file.
ULONG cbData; // Size of the data.
ULONG cbRead = 0; // Data actually read.
// Make sure it is a read-only file.
if (m_fFlags & DBPROP_TMODEF_WRITE)
return E_INVALIDARG;
// Try to allocate the buffer.
cbData = m_cbData;
pData = AllocateMemory(cbData);
IfNullGo(pData);
// Try to read the file into the buffer.
IfFailGo(Read(pData, cbData, &cbRead));
if (cbData != cbRead)
{
_ASSERTE_MSG(FALSE, "Read didn't succeed.");
IfFailGo(CLDB_E_FILE_CORRUPT);
}
// Done with the old data.
Close();
// Open with new data.
hr = Open(NULL /* szName */, STGIO_READ, pData, cbData, NULL /* IStream* */, NULL /* lpSecurityAttributes */);
_ASSERTE(SUCCEEDED(hr)); // should not be a failure code path with open on buffer.
// Mark the new memory so that it will be freed later.
m_pBaseData = m_pData;
m_bFreeMem = true;
ErrExit:
if (FAILED(hr) && pData)
FreeMemory(pData);
return hr;
} // StgIO::LoadFileToMemory
//*****************************************************************************
// Read data from the storage source. This will handle all types of backing
// storage from mmf, streams, and file handles. No read ahead or MRU
// caching is done.
//*****************************************************************************
HRESULT StgIO::Read( // Return code.
void *pbBuff, // Write buffer here.
ULONG cbBuff, // How much to read.
ULONG *pcbRead) // How much read.
{
ULONG cbCopy; // For boundary checks.
void *pbData; // Data buffer for mem read.
HRESULT hr = S_OK;
// Validate arguments, don't call if you don't need to.
_ASSERTE(pbBuff != 0);
_ASSERTE(cbBuff > 0);
// Get the data based on type.
switch (m_iType)
{
// For data on file, there are two possiblities:
// (1) We have an in memory backing store we should use, or
// (2) We just need to read from the file.
case STGIO_HFILE:
case STGIO_HMODULE:
{
_ASSERTE((m_hFile != INVALID_HANDLE_VALUE) || (m_hModule != NULL));
// Backing store does its own paging.
if (IsBackingStore() || IsMemoryMapped())
{
// Force the data into memory.
if (FAILED(hr = GetPtrForMem(GetCurrentOffset(), cbBuff, pbData)))
goto ErrExit;
// Copy it back for the user and save the size.
memcpy(pbBuff, pbData, cbBuff);
if (pcbRead)
*pcbRead = cbBuff;
}
// If there is no backing store, this is just a read operation.
else
{
_ASSERTE((m_iType == STGIO_HFILE) && (m_hFile != INVALID_HANDLE_VALUE));
_ASSERTE(m_hModule == NULL);
ULONG cbTemp = 0;
if (!pcbRead)
pcbRead = &cbTemp;
hr = ReadFromDisk(pbBuff, cbBuff, pcbRead);
m_cbOffset += *pcbRead;
}
}
break;
// Data in a stream is always just read.
case STGIO_STREAM:
{
_ASSERTE((IStream *) m_pIStream);
if (!pcbRead)
pcbRead = &cbCopy;
*pcbRead = 0;
hr = m_pIStream->Read(pbBuff, cbBuff, pcbRead);
if (SUCCEEDED(hr))
m_cbOffset += *pcbRead;
}
break;
// Simply copy the data from our data.
case STGIO_MEM:
case STGIO_SHAREDMEM:
case STGIO_HFILEMEM:
{
_ASSERTE(m_pData && m_cbData);
// Check for read past end of buffer and adjust.
if (GetCurrentOffset() + cbBuff > m_cbData)
cbCopy = m_cbData - GetCurrentOffset();
else
cbCopy = cbBuff;
// Copy the data into the callers buffer.
memcpy(pbBuff, (void *) ((DWORD_PTR)m_pData + GetCurrentOffset()), cbCopy);
if (pcbRead)
*pcbRead = cbCopy;
// Save a logical offset.
m_cbOffset += cbCopy;
}
break;
case STGIO_NODATA:
default:
_ASSERTE(0);
break;
}
ErrExit:
return (hr);
}
//*****************************************************************************
// Write to disk. This function will cache up to a page of data in a buffer
// and peridocially flush it on overflow and explicit request. This makes it
// safe to do lots of small writes without too much performance overhead.
//*****************************************************************************
HRESULT StgIO::Write( // true/false.
const void *pbBuff, // Data to write.
ULONG cbWrite, // How much data to write.
ULONG *pcbWritten) // How much did get written.
{
ULONG cbWriteIn=cbWrite; // Track amount written.
ULONG cbCopy;
HRESULT hr = S_OK;
_ASSERTE(m_rgBuff != 0);
_ASSERTE(cbWrite);
while (cbWrite)
{
// In the case where the buffer is already huge, write the whole thing
// and avoid the cache.
if (m_cbBuff == 0 && cbWrite >= (ULONG) m_iPageSize)
{
if (SUCCEEDED(hr = WriteToDisk(pbBuff, cbWrite, pcbWritten)))
m_cbOffset += cbWrite;
break;
}
// Otherwise cache as much as we can and flush.
else
{
// Determine how much data goes into the cache buffer.
cbCopy = m_iPageSize - m_cbBuff;
cbCopy = min(cbCopy, cbWrite);
// Copy the data into the cache and adjust counts.
memcpy(&m_rgBuff[m_cbBuff], pbBuff, cbCopy);
pbBuff = (void *) ((DWORD_PTR)pbBuff + cbCopy);
m_cbBuff += cbCopy;
m_cbOffset += cbCopy;
cbWrite -= cbCopy;
// If there is enough data, then flush it to disk and reset count.
if (m_cbBuff >= (ULONG) m_iPageSize)
{
if (FAILED(hr = FlushCache()))
break;
}
}
}
// Return value for caller.
if (SUCCEEDED(hr) && pcbWritten)
*pcbWritten = cbWriteIn;
return (hr);
}
//*****************************************************************************
// Moves the file pointer to the new location. This handles the different
// types of storage systems.
//*****************************************************************************
HRESULT StgIO::Seek( // New offset.
int lVal, // How much to move.
ULONG fMoveType) // Direction, use Win32 FILE_xxxx.
{
ULONG cbRtn = 0;
HRESULT hr = NOERROR;
_ASSERTE(fMoveType >= FILE_BEGIN && fMoveType <= FILE_END);
// Action taken depends on type of storage.
switch (m_iType)
{
case STGIO_HFILE:
{
// Use the file system's move.
_ASSERTE(m_hFile != INVALID_HANDLE_VALUE);
cbRtn = ::SetFilePointer(m_hFile, lVal, 0, fMoveType);
// Save the location redundantly.
if (cbRtn != 0xffffffff)
{
// make sure that m_cbOffset will stay within range
if (cbRtn > m_cbData || cbRtn < 0)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = cbRtn;
}
}
break;
case STGIO_STREAM:
{
LARGE_INTEGER iMove;
ULARGE_INTEGER iNewLoc;
// Need a 64-bit int.
iMove.QuadPart = lVal;
// The move types are named differently, but have same value.
if (FAILED(hr = m_pIStream->Seek(iMove, fMoveType, &iNewLoc)))
return (hr);
// make sure that m_cbOffset will stay within range
if (iNewLoc.u.LowPart > m_cbData || iNewLoc.u.LowPart < 0)
IfFailGo(STG_E_INVALIDFUNCTION);
// Save off only out location.
m_cbOffset = iNewLoc.u.LowPart;
}
break;
case STGIO_MEM:
case STGIO_SHAREDMEM:
case STGIO_HFILEMEM:
case STGIO_HMODULE:
{
// We own the offset, so change our value.
switch (fMoveType)
{
case FILE_BEGIN:
// make sure that m_cbOffset will stay within range
if ((ULONG) lVal > m_cbData || lVal < 0)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = lVal;
break;
case FILE_CURRENT:
// make sure that m_cbOffset will stay within range
if (m_cbOffset + lVal > m_cbData)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = m_cbOffset + lVal;
break;
case FILE_END:
_ASSERTE(lVal < (LONG) m_cbData);
// make sure that m_cbOffset will stay within range
if (m_cbData + lVal > m_cbData)
{
IfFailGo(STG_E_INVALIDFUNCTION);
}
m_cbOffset = m_cbData + lVal;
break;
}
cbRtn = m_cbOffset;
}
break;
// Weird to seek with no data.
case STGIO_NODATA:
default:
_ASSERTE(0);
break;
}
ErrExit:
return hr;
}
//*****************************************************************************
// Retrieves the current offset for the storage being used. This value is
// tracked based on Read, Write, and Seek operations.
//*****************************************************************************
ULONG StgIO::GetCurrentOffset() // Current offset.
{
return (m_cbOffset);
}
//*****************************************************************************
// Map the file contents to a memory mapped file and return a pointer to the
// data. For read/write with a backing store, map the file using an internal
// paging system.
//*****************************************************************************
HRESULT StgIO::MapFileToMem( // Return code.
void *&ptr, // Return pointer to file data.
ULONG *pcbSize, // Return size of data.
LPSECURITY_ATTRIBUTES pAttributes) // Security token.
{
char rcShared[MAXSHMEM]; // ANSI version of shared name.
HRESULT hr = S_OK;
// Don't penalize for multiple calls. Also, allow calls for mem type so
// callers don't need to do so much checking.
if (IsBackingStore() ||
IsMemoryMapped() ||
(m_iType == STGIO_MEM) ||
(m_iType == STGIO_SHAREDMEM) ||
(m_iType == STGIO_HFILEMEM))
{
ptr = m_pData;
if (pcbSize)
*pcbSize = m_cbData;
return (S_OK);
}
//#CopySmallFiles
// Check the size of the data we want to map. If it is small enough, then
// simply allocate a chunk of memory from a finer grained heap. This saves
// virtual memory space, page table entries, and should reduce overall working set.
// Also, open for read/write needs a full backing store.
if ((m_cbData <= SMALL_ALLOC_MAP_SIZE) && (SMALL_ALLOC_MAP_SIZE > 0))
{
DWORD cbRead = m_cbData;
_ASSERTE(m_pData == 0);
// Just malloc a chunk of data to use.
m_pBaseData = m_pData = AllocateMemory(m_cbData);
if (!m_pData)
{
hr = OutOfMemory();
goto ErrExit;
}
// Read all of the file contents into this piece of memory.
IfFailGo( Seek(0, FILE_BEGIN) );
if (FAILED(hr = Read(m_pData, cbRead, &cbRead)))
{
FreeMemory(m_pData);
m_pData = 0;
goto ErrExit;
}
_ASSERTE(cbRead == m_cbData);
// If the file isn't being opened for exclusive mode, then free it.
// If it is for exclusive, then we need to keep the handle open so the
// file is locked, preventing other readers. Also leave it open if
// in read/write mode so we can truncate and rewrite.
if (m_hFile == INVALID_HANDLE_VALUE ||
((m_fFlags & DBPROP_TMODEF_EXCLUSIVE) == 0 && (m_fFlags & DBPROP_TMODEF_WRITE) == 0))
{
// If there was a handle open, then free it.
if (m_hFile != INVALID_HANDLE_VALUE)
{
VERIFY(CloseHandle(m_hFile));
m_hFile = INVALID_HANDLE_VALUE;
}
// Free the stream pointer.
else
if (m_pIStream != 0)
{
m_pIStream->Release();
m_pIStream = 0;
}
// Switch the type to memory only access.
m_iType = STGIO_MEM;
}
else
m_iType = STGIO_HFILEMEM;
// Free the memory when we shut down.
m_bFreeMem = true;
}
// Finally, a real mapping file must be created.
else
{
// Now we will map, so better have it right.
_ASSERTE(m_hFile != INVALID_HANDLE_VALUE || m_iType == STGIO_STREAM);
_ASSERTE(m_rgPageMap == 0);
// For read mode, use a memory mapped file since the size will never
// change for the life of the handle.
if ((m_fFlags & DBPROP_TMODEF_WRITE) == 0 && m_iType != STGIO_STREAM)
{
// Create a mapping object for the file.
_ASSERTE(m_hMapping == 0);
DWORD dwProtectionFlags = PAGE_READONLY;
if ((m_hMapping = WszCreateFileMapping(m_hFile, pAttributes, dwProtectionFlags,
0, 0, nullptr)) == 0)
{
return (MapFileError(GetLastError()));
}
m_mtMappedType = MTYPE_FLAT;
// Check to see if the memory already exists, in which case we have
// no guarantees it is the right piece of data.
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
hr = PostError(CLDB_E_SMDUPLICATE, rcShared);
goto ErrExit;
}
// Now map the file into memory so we can read from pointer access.
// <REVISIT_TODO>Note: Added a check for IsBadReadPtr per the Services team which
// indicates that under some conditions this API can give you back
// a totally bogus pointer.</REVISIT_TODO>
if ((m_pBaseData = m_pData = MapViewOfFile(m_hMapping, FILE_MAP_READ,
0, 0, 0)) == 0)
{
hr = MapFileError(GetLastError());
if (SUCCEEDED(hr))
{
_ASSERTE_MSG(FALSE, "Error code doesn't indicate error.");
hr = PostError(CLDB_E_FILE_CORRUPT);
}
// In case we got back a bogus pointer.
m_pBaseData = m_pData = NULL;
goto ErrExit;
}
}
// In write mode, we need the hybrid combination of being able to back up
// the data in memory via cache, but then later rewrite the contents and
// throw away our cached copy. Memory mapped files are not good for this
// case due to poor write characteristics.
else
{
ULONG iMaxSize; // How much memory required for file.
// Figure out how many pages we'll require, round up actual data
// size to page size.
iMaxSize = (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize);
// Check integer overflow in previous statement
if (iMaxSize < m_cbData)
{
IfFailGo(PostError(COR_E_OVERFLOW));
}
// Allocate a bit vector to track loaded pages.
if ((m_rgPageMap = new (nothrow) BYTE[iMaxSize / m_iPageSize]) == 0)
return (PostError(OutOfMemory()));
memset(m_rgPageMap, 0, sizeof(BYTE) * (iMaxSize / m_iPageSize));
// Allocate space for the file contents.
if ((m_pBaseData = m_pData = ::ClrVirtualAlloc(0, iMaxSize, MEM_RESERVE, PAGE_NOACCESS)) == 0)
{
hr = PostError(OutOfMemory());
goto ErrExit;
}
}
}
// Reset any changes made by mapping.
IfFailGo( Seek(0, FILE_BEGIN) );
ErrExit:
// Check for errors and clean up.
if (FAILED(hr))
{
if (m_hMapping)
CloseHandle(m_hMapping);
m_hMapping = 0;
m_pBaseData = m_pData = 0;
m_cbData = 0;
}
ptr = m_pData;
if (pcbSize)
*pcbSize = m_cbData;
return (hr);
}
//*****************************************************************************
// Free the mapping object for shared memory but keep the rest of the internal
// state intact.
//*****************************************************************************
HRESULT StgIO::ReleaseMappingObject() // Return code.
{
// Check type first.
if (m_iType != STGIO_SHAREDMEM)
{
_ASSERTE(FALSE);
return S_OK;
}
// Must have an allocated handle.
_ASSERTE(m_hMapping != 0);
// Freeing the mapping object doesn't do any good if you still have the file.
_ASSERTE(m_hFile == INVALID_HANDLE_VALUE);
// Unmap the memory we allocated before freeing the handle. But keep the
// memory address intact.
if (m_pData)
VERIFY(UnmapViewOfFile(m_pData));
// Free the handle.
if (m_hMapping != 0)
{
VERIFY(CloseHandle(m_hMapping));
m_hMapping = 0;
}
return S_OK;
}
//*****************************************************************************
// Resets the logical base address and size to the value given. This is for
// cases like finding a section embedded in another format, like the .clb inside
// of an image. GetPtrForMem, Read, and Seek will then behave as though only
// data from pbStart to cbSize is valid.
//*****************************************************************************
HRESULT StgIO::SetBaseRange( // Return code.
void *pbStart, // Start of file data.
ULONG cbSize) // How big is the range.
{
if (m_iType == STGIO_SHAREDMEM)
{
// The base range must be inside of the current range.
_ASSERTE((m_pBaseData != NULL) && (m_cbData != 0));
_ASSERTE(((LONG_PTR) pbStart >= (LONG_PTR) m_pBaseData));
_ASSERTE(((LONG_PTR) pbStart + cbSize <= (LONG_PTR) m_pBaseData + m_cbData));
}
// Save the base range per user request.
m_pData = pbStart;
m_cbData = cbSize;
return S_OK;
}
//*****************************************************************************
// Caller wants a pointer to a chunk of the file. This function will make sure
// that the memory for that chunk has been committed and will load from the
// file if required. This algorithm attempts to load no more data from disk
// than is necessary. It walks the required pages from lowest to highest,
// and for each block of unloaded pages, the memory is committed and the data
// is read from disk. If all pages are unloaded, all of them are loaded at
// once to speed throughput from disk.
//*****************************************************************************
HRESULT StgIO::GetPtrForMem( // Return code.
ULONG cbStart, // Where to start getting memory.
ULONG cbSize, // How much data.
void *&ptr) // Return pointer to memory here.
{
int iFirst, iLast; // First and last page required.
ULONG iOffset, iSize; // For committing ranges of memory.
int i, j; // Loop control.
HRESULT hr;
// We need either memory (mmf or user supplied) or a backing store to
// return a pointer. Call Read if you don't have these.
if (!IsBackingStore() && m_pData == 0)
return (PostError(BadError(E_UNEXPECTED)));
// Validate the caller isn't asking for a data value out of range.
if (!(ClrSafeInt<ULONG>::addition(cbStart, cbSize, iOffset)
&& (iOffset <= m_cbData)))
return (PostError(E_INVALIDARG));
// This code will check for pages that need to be paged from disk in
// order for us to return a pointer to that memory.
if (IsBackingStore())
{
// Backing store is bogus when in rewrite mode.
if (m_bRewrite)
return (PostError(BadError(E_UNEXPECTED)));
// Must have the page map to continue.
_ASSERTE(m_rgPageMap && m_iPageSize && m_pData);
// Figure out the first and last page that are required for commit.
iFirst = cbStart / m_iPageSize;
iLast = (cbStart + cbSize - 1) / m_iPageSize;
// Avoid confusion.
ptr = 0;
// Do a smart load of every page required. Do not reload pages that have
// already been brought in from disk.
//<REVISIT_TODO>@FUTURE: add an optimization so that when all pages have been faulted, we no
// longer to a page by page search.</REVISIT_TODO>
for (i=iFirst; i<=iLast; )
{
// Find the first page that hasn't already been loaded.
while (GetBit(m_rgPageMap, i) && i<=iLast)
++i;
if (i > iLast)
break;
// Offset for first thing to load.
iOffset = i * m_iPageSize;
iSize = 0;
// See how many in a row have not been loaded.
for (j=i; i<=iLast && !GetBit(m_rgPageMap, i); i++)
{
// Safe: iSize += m_iPageSize;
if (!(ClrSafeInt<ULONG>::addition(iSize, m_iPageSize, iSize)))
{
return PostError(E_INVALIDARG);
}
}
// First commit the memory for this part of the file.
if (::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset),
iSize, MEM_COMMIT, PAGE_READWRITE) == 0)
return (PostError(OutOfMemory()));
// Now load that portion of the file from disk.
if (FAILED(hr = Seek(iOffset, FILE_BEGIN)) ||
FAILED(hr = ReadFromDisk((void *) ((DWORD_PTR) m_pData + iOffset), iSize, 0)))
{
return (hr);
}
// Change the memory to read only to avoid any modifications. Any faults
// that occur indicate a bug whereby the engine is trying to write to
// protected memory.
_ASSERTE(::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset),
iSize, MEM_COMMIT, PAGE_READONLY) != 0);
// Record each new loaded page.
for (; j<i; j++)
SetBit(m_rgPageMap, j, true);
}
// Everything was brought into memory, so now return pointer to caller.
ptr = (void *) ((DWORD_PTR) m_pData + cbStart);
}
// Memory version or memory mapped file work the same way.
else if (IsMemoryMapped() ||
(m_iType == STGIO_MEM) ||
(m_iType == STGIO_SHAREDMEM) ||
(m_iType == STGIO_HFILEMEM))
{
if (!(cbStart <= m_cbData))
return (PostError(E_INVALIDARG));
ptr = (void *) ((DWORD_PTR) m_pData + cbStart);
}
// What's left?! Add some defense.
else
{
_ASSERTE(0);
ptr = 0;
return (PostError(BadError(E_UNEXPECTED)));
}
return (S_OK);
}
//*****************************************************************************
// For cached writes, flush the cache to the data store.
//*****************************************************************************
HRESULT StgIO::FlushCache()
{
ULONG cbWritten;
HRESULT hr;
if (m_cbBuff)
{
if (FAILED(hr = WriteToDisk(m_rgBuff, m_cbBuff, &cbWritten)))
return (hr);
m_cbBuff = 0;
}
return (S_OK);
}
//*****************************************************************************
// Tells the file system to flush any cached data it may have. This is
// expensive, but if successful guarantees you won't lose writes short of
// a disk failure.
//*****************************************************************************
HRESULT StgIO::FlushFileBuffers()
{
_ASSERTE(!IsReadOnly());
if (m_hFile != INVALID_HANDLE_VALUE)
{
if (::FlushFileBuffers(m_hFile))
return (S_OK);
else
return (MapFileError(GetLastError()));
}
return (S_OK);
}
//*****************************************************************************
// Called after a successful rewrite of an existing file. The in memory
// backing store is no longer valid because all new data is in memory and
// on disk. This is essentially the same state as created, so free up some
// working set and remember this state.
//*****************************************************************************
HRESULT StgIO::ResetBackingStore() // Return code.
{
// Don't be calling this function for read only data.
_ASSERTE(!IsReadOnly());
// Free up any backing store data we no longer need now that everything
// is in memory.
FreePageMap();
return (S_OK);
}
//
// Private.
//
//*****************************************************************************
// This version will force the data in cache out to disk for real. The code
// can handle the different types of storage we might be sitting on based on
// the open type.
//*****************************************************************************
HRESULT StgIO::WriteToDisk( // Return code.
const void *pbBuff, // Buffer to write.
ULONG cbWrite, // How much.
ULONG *pcbWritten) // Return how much written.
{
ULONG cbWritten; // Buffer for write funcs.
HRESULT hr = S_OK;
// Pretty obvious.
_ASSERTE(!IsReadOnly());
// Always need a buffer to write this data to.
if (!pcbWritten)
pcbWritten = &cbWritten;
// Action taken depends on type of storage.
switch (m_iType)
{
case STGIO_HFILE:
case STGIO_HFILEMEM:
{
// Use the file system's move.
_ASSERTE(m_hFile != INVALID_HANDLE_VALUE);
// Do the write to disk.
if (!::WriteFile(m_hFile, pbBuff, cbWrite, pcbWritten, 0))
hr = MapFileError(GetLastError());
}
break;
// Free the stream pointer.
case STGIO_STREAM:
{
// Delegate write to stream code.
hr = m_pIStream->Write(pbBuff, cbWrite, pcbWritten);
}
break;
// We cannot write to fixed read/only memory or LoadLibrary module.
case STGIO_HMODULE:
case STGIO_MEM:
case STGIO_SHAREDMEM:
_ASSERTE(0);
hr = BadError(E_UNEXPECTED);
break;
// Weird to seek with no data.
case STGIO_NODATA:
default:
_ASSERTE(0);
break;
}
return (hr);
}
//*****************************************************************************
// This version only reads from disk.
//*****************************************************************************
HRESULT StgIO::ReadFromDisk( // Return code.
void *pbBuff, // Write buffer here.
ULONG cbBuff, // How much to read.
ULONG *pcbRead) // How much read.
{
ULONG cbRead;
_ASSERTE(m_iType == STGIO_HFILE || m_iType == STGIO_STREAM);
// Need to have a buffer.
if (!pcbRead)
pcbRead = &cbRead;
// Read only from file to avoid recursive logic.
if (m_iType == STGIO_HFILE || m_iType == STGIO_HFILEMEM)
{
if (::ReadFile(m_hFile, pbBuff, cbBuff, pcbRead, 0))
return (S_OK);
return (MapFileError(GetLastError()));
}
// Read directly from stream.
else
{
return (m_pIStream->Read(pbBuff, cbBuff, pcbRead));
}
}
//*****************************************************************************
// Copy the contents of the file for this storage to the target path.
//*****************************************************************************
HRESULT StgIO::CopyFileInternal( // Return code.
LPCWSTR szTo, // Target save path for file.
int bFailIfThere, // true to fail if target exists.
int bWriteThrough) // Should copy be written through OS cache.
{
DWORD iCurrent; // Save original location.
DWORD cbRead; // Byte count for buffer.
DWORD cbWrite; // Check write of bytes.
const DWORD cbBuff = 4096; // Size of buffer for copy (in bytes).
BYTE *pBuff = (BYTE*)alloca(cbBuff); // Buffer for copy.
HANDLE hFile; // Target file.
HRESULT hr = S_OK;
// Create target file.
if ((hFile = ::WszCreateFile(szTo, GENERIC_WRITE, 0, 0,
(bFailIfThere) ? CREATE_NEW : CREATE_ALWAYS,
(bWriteThrough) ? FILE_FLAG_WRITE_THROUGH : 0,
0)) == INVALID_HANDLE_VALUE)
{
return (MapFileError(GetLastError()));
}
// Save current location and reset it later.
iCurrent = ::SetFilePointer(m_hFile, 0, 0, FILE_CURRENT);
::SetFilePointer(m_hFile, 0, 0, FILE_BEGIN);
// Copy while there are bytes.
while (::ReadFile(m_hFile, pBuff, cbBuff, &cbRead, 0) && cbRead)
{
if (!::WriteFile(hFile, pBuff, cbRead, &cbWrite, 0) || cbWrite != cbRead)
{
hr = STG_E_WRITEFAULT;
break;
}
}
// Reset file offset.
::SetFilePointer(m_hFile, iCurrent, 0, FILE_BEGIN);
// Close target.
if (!bWriteThrough)
VERIFY(::FlushFileBuffers(hFile));
::CloseHandle(hFile);
return (hr);
}
//*****************************************************************************
// Free the data used for backing store from disk in read/write scenario.
//*****************************************************************************
void StgIO::FreePageMap()
{
// If a small file was allocated, then free that memory.
if (m_bFreeMem && m_pBaseData)
FreeMemory(m_pBaseData);
// For mmf, close handles and free resources.
else if (m_hMapping && m_pBaseData)
{
VERIFY(UnmapViewOfFile(m_pBaseData));
VERIFY(CloseHandle(m_hMapping));
}
// For our own system, free memory.
else if (m_rgPageMap && m_pBaseData)
{
delete [] m_rgPageMap;
m_rgPageMap = 0;
VERIFY(::ClrVirtualFree(m_pBaseData, (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize), MEM_DECOMMIT));
VERIFY(::ClrVirtualFree(m_pBaseData, 0, MEM_RELEASE));
m_pBaseData = 0;
m_cbData = 0;
}
m_pBaseData = 0;
m_hMapping = 0;
m_cbData = 0;
}
//*****************************************************************************
// Check the given pointer and ensure it is aligned correct. Return true
// if it is aligned, false if it is not.
//*****************************************************************************
int StgIO::IsAlignedPtr(ULONG_PTR Value, int iAlignment)
{
HRESULT hr;
void *ptrStart = NULL;
if ((m_iType == STGIO_STREAM) ||
(m_iType == STGIO_SHAREDMEM) ||
(m_iType == STGIO_MEM))
{
return ((Value - (ULONG_PTR) m_pData) % iAlignment == 0);
}
else
{
hr = GetPtrForMem(0, 1, ptrStart);
_ASSERTE(hr == S_OK && "GetPtrForMem failed");
_ASSERTE(Value > (ULONG_PTR) ptrStart);
return (((Value - (ULONG_PTR) ptrStart) % iAlignment) == 0);
}
} // int StgIO::IsAlignedPtr()
//*****************************************************************************
// These helper functions are used to allocate fairly large pieces of memory,
// more than should be taken from the runtime heap, but less that would require
// virtual memory overhead.
//*****************************************************************************
// #define _TRACE_MEM_ 1
void *AllocateMemory(int iSize)
{
void * ptr;
ptr = new (nothrow) BYTE[iSize];
#if defined(_DEBUG) && defined(_TRACE_MEM_)
static int i=0;
DbgWriteEx(W("AllocateMemory: (%d) 0x%08x, size %d\n"), ++i, ptr, iSize);
#endif
return (ptr);
}
void FreeMemory(void *pbData)
{
#if defined(_DEBUG) && defined(_TRACE_MEM_)
static int i=0;
DbgWriteEx(W("FreeMemory: (%d) 0x%08x\n"), ++i, pbData);
#endif
_ASSERTE(pbData);
delete [] (BYTE *) pbData;
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/CoreMangLib/system/delegate/generics/negativegenerics.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//Covers various negative binding cases for delegates and generics...
using System;
//Define some classes and method for use in our scenarios...
class A<T>{
public virtual void GMeth<U>(){}
}
class B<T> : A<int>{
public override void GMeth<U>(){}
}
//Define our delegate types...
delegate void Closed();
delegate void Open(B<int> b);
delegate void GClosed<T>();
class Test_negativegenerics{
public static int retVal=100;
public static int Main(){
//Try to create an open-instance delegate to a virtual generic method (@TODO - Need early bound case here too)
//Try to create a generic delegate of a non-instantiated type
//Try to create a delegate over a non-instantiated target type
//Try to create a delegate over a non-instantiated target method
//Try to create a delegate to a generic method by name
//Does Closed() over GMeth<int> == Closed() over GMeth<double>??
//Does GClosed<int>() over GMeth<int> == GClosed<double>() over GMeth<int>??
Console.WriteLine("Done - {0}",retVal==100?"Passed":"Failed");
return retVal;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//Covers various negative binding cases for delegates and generics...
using System;
//Define some classes and method for use in our scenarios...
class A<T>{
public virtual void GMeth<U>(){}
}
class B<T> : A<int>{
public override void GMeth<U>(){}
}
//Define our delegate types...
delegate void Closed();
delegate void Open(B<int> b);
delegate void GClosed<T>();
class Test_negativegenerics{
public static int retVal=100;
public static int Main(){
//Try to create an open-instance delegate to a virtual generic method (@TODO - Need early bound case here too)
//Try to create a generic delegate of a non-instantiated type
//Try to create a delegate over a non-instantiated target type
//Try to create a delegate over a non-instantiated target method
//Try to create a delegate to a generic method by name
//Does Closed() over GMeth<int> == Closed() over GMeth<double>??
//Does GClosed<int>() over GMeth<int> == GClosed<double>() over GMeth<int>??
Console.WriteLine("Done - {0}",retVal==100?"Passed":"Failed");
return retVal;
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09/b15307/b15307.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Globalization;
namespace DefaultNamespace
{
internal class bug
{
public static int Main(String[] args)
{
CultureInfo ci = new CultureInfo("en-us");
return 100;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Globalization;
namespace DefaultNamespace
{
internal class bug
{
public static int Main(String[] args)
{
CultureInfo ci = new CultureInfo("en-us");
return 100;
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/vm/codeman.inl | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
inline BOOL ExecutionManager::IsCollectibleMethod(const METHODTOKEN& MethodToken)
{
WRAPPER_NO_CONTRACT;
return MethodToken.m_pRangeSection->flags & RangeSection::RANGE_SECTION_COLLECTIBLE;
}
inline TADDR IJitManager::JitTokenToModuleBase(const METHODTOKEN& MethodToken)
{
return MethodToken.m_pRangeSection->LowAddress;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
inline BOOL ExecutionManager::IsCollectibleMethod(const METHODTOKEN& MethodToken)
{
WRAPPER_NO_CONTRACT;
return MethodToken.m_pRangeSection->flags & RangeSection::RANGE_SECTION_COLLECTIBLE;
}
inline TADDR IJitManager::JitTokenToModuleBase(const METHODTOKEN& MethodToken)
{
return MethodToken.m_pRangeSection->LowAddress;
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftRightLogicalNarrowingUpper.Vector128.Int32.1.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ShiftRightLogicalNarrowingUpper_Vector128_Int32_1()
{
var test = new ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int64[] inArray2, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int32> _fld1;
public Vector128<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
return testStruct;
}
public void RunStructFldScenario(ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1 testClass)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(_fld1, _fld2, 1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1 testClass)
{
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int64>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int64*)(pFld2)),
1
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly byte Imm = 1;
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector64<Int32> _clsVar1;
private static Vector128<Int64> _clsVar2;
private Vector64<Int32> _fld1;
private Vector128<Int64> _fld2;
private DataTable _dataTable;
static ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
}
public ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogicalNarrowingUpper), new Type[] { typeof(Vector64<Int32>), typeof(Vector128<Int64>), typeof(byte) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogicalNarrowingUpper), new Type[] { typeof(Vector64<Int32>), typeof(Vector128<Int64>), typeof(byte) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
_clsVar1,
_clsVar2,
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int64>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pClsVar1)),
AdvSimd.LoadVector128((Int64*)(pClsVar2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr);
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(op1, op2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(op1, op2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1();
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(test._fld1, test._fld2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1();
fixed (Vector64<Int32>* pFld1 = &test._fld1)
fixed (Vector128<Int64>* pFld2 = &test._fld2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int64*)(pFld2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(_fld1, _fld2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int64>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int64*)(pFld2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(test._fld1, test._fld2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(&test._fld1)),
AdvSimd.LoadVector128((Int64*)(&test._fld2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Int32> firstOp, Vector128<Int64> secondOp, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), firstOp);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), secondOp);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector128<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int32[] firstOp, Int64[] secondOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ShiftRightLogicalNarrowingUpper(firstOp, secondOp, Imm, i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightLogicalNarrowingUpper)}<Int32>(Vector64<Int32>, Vector128<Int64>, 1): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ShiftRightLogicalNarrowingUpper_Vector128_Int32_1()
{
var test = new ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int64[] inArray2, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int32> _fld1;
public Vector128<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
return testStruct;
}
public void RunStructFldScenario(ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1 testClass)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(_fld1, _fld2, 1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1 testClass)
{
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int64>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int64*)(pFld2)),
1
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly byte Imm = 1;
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector64<Int32> _clsVar1;
private static Vector128<Int64> _clsVar2;
private Vector64<Int32> _fld1;
private Vector128<Int64> _fld2;
private DataTable _dataTable;
static ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
}
public ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogicalNarrowingUpper), new Type[] { typeof(Vector64<Int32>), typeof(Vector128<Int64>), typeof(byte) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogicalNarrowingUpper), new Type[] { typeof(Vector64<Int32>), typeof(Vector128<Int64>), typeof(byte) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
_clsVar1,
_clsVar2,
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int64>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pClsVar1)),
AdvSimd.LoadVector128((Int64*)(pClsVar2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr);
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(op1, op2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(op1, op2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1();
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(test._fld1, test._fld2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new ImmBinaryOpTest__ShiftRightLogicalNarrowingUpper_Vector128_Int32_1();
fixed (Vector64<Int32>* pFld1 = &test._fld1)
fixed (Vector128<Int64>* pFld2 = &test._fld2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int64*)(pFld2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(_fld1, _fld2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int64>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int64*)(pFld2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(test._fld1, test._fld2, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ShiftRightLogicalNarrowingUpper(
AdvSimd.LoadVector64((Int32*)(&test._fld1)),
AdvSimd.LoadVector128((Int64*)(&test._fld2)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Int32> firstOp, Vector128<Int64> secondOp, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), firstOp);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), secondOp);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector128<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int32[] firstOp, Int64[] secondOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ShiftRightLogicalNarrowingUpper(firstOp, secondOp, Imm, i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightLogicalNarrowingUpper)}<Int32>(Vector64<Int32>, Vector128<Int64>, 1): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Diagnostics.Process/src/System/Diagnostics/ProcessWaitHandle.Unix.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Threading;
namespace System.Diagnostics
{
internal sealed class ProcessWaitHandle : WaitHandle
{
internal ProcessWaitHandle(ProcessWaitState processWaitState)
{
// Get the wait state's event, and use that event's safe wait handle
// in place of ours. This will let code register for completion notifications
// on this ProcessWaitHandle and be notified when the wait state's handle completes.
ManualResetEvent mre = processWaitState.EnsureExitedEvent();
this.SetSafeWaitHandle(mre.GetSafeWaitHandle());
}
protected override void Dispose(bool explicitDisposing)
{
// ProcessWaitState will dispose the handle
this.SafeWaitHandle = null;
base.Dispose(explicitDisposing);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Threading;
namespace System.Diagnostics
{
internal sealed class ProcessWaitHandle : WaitHandle
{
internal ProcessWaitHandle(ProcessWaitState processWaitState)
{
// Get the wait state's event, and use that event's safe wait handle
// in place of ours. This will let code register for completion notifications
// on this ProcessWaitHandle and be notified when the wait state's handle completes.
ManualResetEvent mre = processWaitState.EnsureExitedEvent();
this.SetSafeWaitHandle(mre.GetSafeWaitHandle());
}
protected override void Dispose(bool explicitDisposing)
{
// ProcessWaitState will dispose the handle
this.SafeWaitHandle = null;
base.Dispose(explicitDisposing);
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/Interop/ICustomMarshaler/Primitives/CMakeLists.txt | project (CustomMarshalersPrimitives)
include_directories(${INC_PLATFORM_DIR})
set(SOURCES ICustomMarshalerNative.cpp )
# add the executable
add_library (CustomMarshalersPrimitives SHARED ${SOURCES})
target_link_libraries(CustomMarshalersPrimitives ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS CustomMarshalersPrimitives DESTINATION bin)
| project (CustomMarshalersPrimitives)
include_directories(${INC_PLATFORM_DIR})
set(SOURCES ICustomMarshalerNative.cpp )
# add the executable
add_library (CustomMarshalersPrimitives SHARED ${SOURCES})
target_link_libraries(CustomMarshalersPrimitives ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS CustomMarshalersPrimitives DESTINATION bin)
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Threading.Tasks.Parallel/tests/ParallelStateTest.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// This file contains functional tests for ParallelLoopState
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using Xunit;
namespace System.Threading.Tasks.Test
{
public sealed class ParallelStateTest
{
#region Private Fields
private readonly object _lock = new object();
private readonly IEnumerable<int> _collection = null; // the collection used in Foreach
private readonly Barrier _barrier;
// Holds list of available actions
private readonly Dictionary<string, Action<long, ParallelLoopState>> _availableActions = new Dictionary<string, Action<long, ParallelLoopState>>();
private readonly Dictionary<string, Action<ParallelLoopResult?>> _availableVerifications = new Dictionary<string, Action<ParallelLoopResult?>>();
private readonly TestParameters _parameters;
private readonly ManualResetEventSlim _mreSlim;
private readonly double[] _results; // global place to store the workload result for verification
// data structure used with ParallelLoopState<TLocal>
// each row is the sequence of loop "index" finished in the same thread
// private Dictionary<int, List<int>> sequences;
private long _threadCount;
private readonly List<int>[] _sequences;
private readonly List<long>[] _sequences64;
private long _startIndex = 0; // start index for the loop
// Hold list of actions to be performed
private List<Action<long, ParallelLoopState>> _actions = new List<Action<long, ParallelLoopState>>();
// Hold list of verification
private Queue<Action<ParallelLoopResult?>> _verifications = new Queue<Action<ParallelLoopResult?>>();
private volatile bool _isStopped = false; // Flag to indicate that we called Stop() on the Parallel state
private long? _lowestBreakIter = null; // LowestBreakIteration value holder, null indicates that Break hasn't been called
private volatile bool _isExceptional = false; // Flag to indicate exception thrown in the test
private int _iterCount = 0; // test own counter for certain scenario, so the test can change behaviour after certain number of loop iteration
#endregion
#region Constructor
public ParallelStateTest(TestParameters parameters)
{
_parameters = parameters;
_mreSlim = new ManualResetEventSlim(false);
_results = new double[parameters.Count];
_sequences = new List<int>[1024];
_sequences64 = new List<long>[1024];
_threadCount = 0;
// Set available actions
_availableActions["Stop"] = StopAction;
_availableActions["Break"] = BreakAction;
_availableActions["Exceptional"] = ExceptionalAction;
_availableActions["MultipleStop"] = MultipleStopAction;
_availableActions["MultipleBreak"] = MultipleBreakAction;
_availableActions["MultipleException"] = MultipleExceptionAction;
_availableActions["SyncWaitStop"] = SyncWaitStop;
_availableActions["SyncSetStop"] = SyncSetStop;
_availableActions["SyncWaitBreak"] = SyncWaitBreak;
_availableActions["SyncSetBreak"] = SyncSetBreak;
_availableActions["SyncWaitStopCatchExp"] = SyncWaitStopCatchExp;
_availableActions["SyncWaitBreakCatchExp"] = SyncWaitBreakCatchExp;
_availableActions["SyncWaitExceptional"] = SyncWaitExceptional;
_availableActions["SyncSetExceptional"] = SyncSetExceptional;
// Set available verifications
_availableVerifications["StopVerification"] = StopVerification;
_availableVerifications["BreakVerification"] = BreakVerification;
_availableVerifications["ExceptionalVerification"] = ExceptionalVerification;
_barrier = new Barrier(parameters.Count);
// A barrier is used in the workload to ensure that all tasks are running before any proceed.
// This causes delays if the count is higher than the number of processors, as the thread pool
// will need to (slowly) inject additional threads to meet the demand. As a less-than-ideal
// workaround, we change the thread pool's min thread count to be at least the number required
// for the test. Not perfect, but better than nothing.
ThreadPoolHelpers.EnsureMinThreadsAtLeast(parameters.Count);
int length = parameters.Count;
if (length < 0)
length = 0;
if (parameters.Api != API.For)
{
int[] collArray = new int[length];
for (int j = 0; j < length; j++)
collArray[j] = ((int)_startIndex) + j;
if (parameters.Api == API.ForeachOnArray)
_collection = collArray;
else if (parameters.Api == API.ForeachOnList)
_collection = new List<int>(collArray);
else
_collection = collArray;
}
int index = 0;
for (index = 0; index < parameters.Count; index++)
_actions.Add(DummyAction);
index = 0;
foreach (string action in parameters.Actions)
{
Action<long, ParallelLoopState> a = null;
string[] actionIndexPair = action.Split('_');
if (!_availableActions.TryGetValue(actionIndexPair[0], out a))
throw new ArgumentException(actionIndexPair[0] + " is not a valid action");
_actions[actionIndexPair.Length > 1 ? int.Parse(actionIndexPair[1]) : index++] = a;
}
foreach (string verification in parameters.Verifications)
{
Action<ParallelLoopResult?> act = null;
if (!_availableVerifications.TryGetValue(verification, out act))
throw new ArgumentException(verification + " is not a valid verification");
_verifications.Enqueue(act);
}
}
#endregion
internal void RealRun()
{
ParallelLoopResult? loopResult = null;
try
{
if (!_parameters.Is64)
{
if (_parameters.Api == API.For)
{
if (_parameters.WithLocalState)
{
// call Parallel.For with step and ParallelLoopState<TLocal>, plus threadLocalFinally
loopResult = Parallel.For<List<int>>((int)_startIndex, (int)_startIndex + _parameters.Count, ThreadLocalInit, WorkWithLocalState, ThreadLocalFinally);
}
else
{
loopResult = Parallel.For((int)_startIndex, (int)_startIndex + _parameters.Count, WorkWithNoLocalState);
}
}
else
{
if (_parameters.WithLocalState)
{
// call Parallel.Foreach and ParallelLoopState<TLocal>, plus threadLocalFinally
loopResult = Parallel.ForEach<int, List<int>>(_collection, ThreadLocalInit, WorkWithLocalState, ThreadLocalFinally);
}
else
{
loopResult = Parallel.ForEach<int>(_collection, WorkWithNoLocalState);
}
}
}
else
{
_startIndex = int.MaxValue;
if (_parameters.Api == API.For)
{
if (_parameters.WithLocalState)
{
// call Parallel.For with step and ParallelLoopState<TLocal>, plus threadLocalFinally
loopResult = Parallel.For<List<long>>(_startIndex, _startIndex + _parameters.Count, ThreadLocalInit64, WorkWithLocalState, ThreadLocalFinally64);
}
else
{
loopResult = Parallel.For(_startIndex, _startIndex + _parameters.Count, WorkWithNoLocalState);
}
}
}
Assert.False(_parameters.ExpectingException, "SystemInvalidOperation Exception was not thrown when expecting one");
}
catch (AggregateException exp)
{
if (_parameters.ExpectingException)
Assert.IsType<InvalidOperationException>(exp.Flatten().InnerException);
}
// If the config file specified what verifications to use for this test, verify result
while (_verifications.Count > 0)
{
_verifications.Dequeue().Invoke(loopResult);
}
}
#region Workloads
// Workload for Parallel.For / Foreach
private void Work(long i)
{
//
// Make sure all task are spawned, before moving on
//
_barrier.SignalAndWait();
if (_results[i - _startIndex] == 0)
_results[i - _startIndex] = ZetaSequence((int)(i - _startIndex) + 1000);
else
_results[i - _startIndex] = double.MinValue; //same index should not be processed twice
}
// Workload for Parallel.For / Foreach with parallelloopstate but no thread local state
private void WorkWithNoLocalState(int i, ParallelLoopState state)
{
Debug.WriteLine("WorkWithNoLocalState(int) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
_actions[i].Invoke(i, state);
}
// Workload for Parallel.For / Foreach with parallel loopstate and thread local state
private List<int> WorkWithLocalState(int i, ParallelLoopState state, List<int> threadLocalValue)
{
Debug.WriteLine("WorkWithLocalState(int) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
threadLocalValue.Add(i + (int)_startIndex);
_actions[i].Invoke(i, state);
return threadLocalValue;
}
// Workload for Parallel.For / Foreach with index, parallel loop state and thread local state
private List<int> WorkWithLocalState(int i, int index, ParallelLoopState state, List<int> threadLocalValue)
{
Debug.WriteLine("WorkWithLocalState(int, index) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
threadLocalValue.Add(index + (int)_startIndex);
_actions[index].Invoke(index, state);
return threadLocalValue;
}
// Workload for Parallel.For with long range
private void WorkWithNoLocalState(long i, ParallelLoopState state)
{
Debug.WriteLine("WorkWithNoLocalState(long) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
_actions[(int)(i - _startIndex)].Invoke(i, state);
}
// Workload for Parallel.For with long range
private List<long> WorkWithLocalState(long i, ParallelLoopState state, List<long> threadLocalValue)
{
Debug.WriteLine("WorkWithLocalState(long) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
threadLocalValue.Add(i + _startIndex);
_actions[(int)(i - _startIndex)].Invoke(i, state);
return threadLocalValue;
}
/// <summary>
/// This action waits for the other iteration to call Stop and
/// set the MRE when its done. Once the MRE is set, this function
/// calls Break which results in an InvalidOperationException
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitBreakCatchExp(long i, ParallelLoopState state)
{
_mreSlim.Wait();
BreakActionHelper(i, state, true);
}
/// <summary>
/// This action waits for the other iteration to call Break and
/// set the MRE when its done. Once the MRE is set, this function
/// calls Stop which results in an InvalidOperationException
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitStopCatchExp(long i, ParallelLoopState state)
{
_mreSlim.Wait();
StopActionHelper(i, state, true);
}
/// <summary>
/// This action waits for the other iteration to call Break and
/// set the MRE when its done. Once the MRE is set, this function
/// calls Break which results in the lower iteration winning
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitBreak(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncWaitBreakAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
_mreSlim.Wait();
BreakAction(i, state);
}
/// <summary>
/// This action calls Break and notifies other iterations
/// by setting the shared MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncSetBreak(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncSetBreakAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
// Do some sleep to reduce race condition with next action
Task delay = Task.Delay(10);
delay.Wait();
BreakAction(i, state);
_mreSlim.Set();
}
/// <summary>
/// This function waits for another iteration to call Stop
/// and then set the shared MRE to notify when it is done
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitStop(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncWaitStopAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
_mreSlim.Wait();
StopAction(i, state);
}
/// <summary>
/// This action calls Stop and notifies the other iteration by setting the MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncSetStop(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncSetStopAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
// Do some sleep to reduce race condition with next action
Task delay = Task.Delay(10);
delay.Wait();
StopAction(i, state);
_mreSlim.Set();
}
/// <summary>
/// This action waits for another iteration to throw an exception and notify
/// when it is done by setting the MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitExceptional(long i, ParallelLoopState state)
{
_mreSlim.Wait();
ExceptionalAction(i, state);
}
/// <summary>
/// This action throws an exception and notifies the rest of the iterations
/// by setting a shared MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncSetExceptional(long i, ParallelLoopState state)
{
// Do some sleep to reduce race condition with next action
Task delay = Task.Delay(10);
delay.Wait();
ExceptionalAction(i, state);
_mreSlim.Set();
}
/// <summary>
/// This action is a NOP - does nothing
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void DummyAction(long i, ParallelLoopState state)
{
}
/// <summary>
/// This actions calls Stop on the current iteration. Note that this is called by only one iteration in the loop
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void StopAction(long i, ParallelLoopState state)
{
StopActionHelper(i, state, false);
}
/// <summary>
/// Calls Break for the current Iteration. Note that this is called by only one iteration in the loop
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void BreakAction(long i, ParallelLoopState state)
{
BreakActionHelper(i, state, false);
}
/// <summary>
/// Note!! This function is not threadsafe and care must be taken so that it is not called concurrently
///
/// Helper function that calls Stop for the current iteration and sets test flag(m_isStopped) to true
///
/// 1) If stop was already called, check if ParallelLoopState-->IsStopped is true
/// 2) If stop was already called, check if ParallelLoopState-->ShouldExitCurrentIteration is true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
/// <param name="catchException"></param>
private void StopActionHelper(long i, ParallelLoopState state, bool catchException)
{
Debug.WriteLine("Calling StopAction on index: {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
// We already called Stop() on the Parallel state
Assert.False(_isStopped && _isStopped != state.IsStopped, string.Format("Expecting IsStopped to be true for iteration {0}", i));
// If we previously called Stop() on the parallel state,
// we expect all iterations see the state's ShouldExitCurrentIteration to be true
Assert.False(_isStopped && !state.ShouldExitCurrentIteration, string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}", i));
try
{
state.Stop();
_isStopped = true;
// If Stop is called after a Break was called then an InvalidOperationException is expected
Assert.False(catchException, "Not getting InvalidOperationException from Stop() when expecting one");
}
// If Stop is called after a Break was called then an InvalidOperationException is expected
catch (InvalidOperationException) when (catchException)
{
}
}
/// <summary>
/// Thread safe version of Stop Action. This can safely be invoked concurrently
///
/// Stops the loop for first parameters.Count/2 iterations and sets the test flag (m_iterCount) to indicate this
///
/// 1) If Stop was previously called, then check that ParallelLoopState-->IsStopped is set to true
/// 2) If Stop was previously called, then check that ParallelLoopState-->ShouldExitCurrentIteration is true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
/// <param name="catchException"></param>
private void MultipleStopAction(long i, ParallelLoopState state)
{
if (Interlocked.Increment(ref _iterCount) < _parameters.Count / 2)
{
state.Stop();
_isStopped = true;
}
else
{
// We already called Stop() on the Parallel state
Assert.False(_isStopped && !state.IsStopped, string.Format("Expecting IsStopped to be true for iteration {0}", i));
// If we previously called Stop() on the parallel state,
// we expect all iterations see the state's ShouldExitCurrentIteration to be true
Assert.False(_isStopped && !state.ShouldExitCurrentIteration, string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}", i));
}
}
/// <summary>
/// NOTE!!! that this function is not thread safe and cannot be called concurrently
///
/// Helper function that calls Break for the current iteration if
/// 1) Break has never been called so far
/// 2) if the current iteration is smaller than the iteration for which Break was previously called
///
/// If Break was already called then check that
/// 1) The lowest break iteration stored by us is the same as the one passed in State
/// 2) If this iteration is greater than the lowest break iteration, then shouldExitCurrentIteration should be true
/// 3) if this iteration is lower than the lowest break iteration then shouldExitCurrentIteration should be false
/// </summary>
/// <param name="i">current iteration </param>
/// <param name="state">the parallel loop state</param>
/// <param name="catchException">whether calling Break will throw an InvalidOperationException</param>
private void BreakActionHelper(long i, ParallelLoopState state, bool catchException)
{
Debug.WriteLine("Calling BreakAction on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
// If we previously called Break() on the parallel state,
// we expect all iterations to have the same LowestBreakIteration value
if (_lowestBreakIter.HasValue)
{
Assert.False(state.LowestBreakIteration.Value != _lowestBreakIter.Value,
string.Format("Expecting LowestBreakIteration value to be {0} for iteration {1}, while getting {2}", _lowestBreakIter, i, state.LowestBreakIteration.Value));
// If we previously called Break() on the parallel state,
// we expect all higher iterations see the state's ShouldExitCurrentIteration to be true
Assert.False(i > _lowestBreakIter.Value && !state.ShouldExitCurrentIteration,
string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}, LowestBreakIteration is {1}", i, _lowestBreakIter));
}
if (_lowestBreakIter.HasValue && i < _lowestBreakIter.Value && state.ShouldExitCurrentIteration)
{
long lbi = _lowestBreakIter.Value;
// If we previously called Break() on the parallel state,
// we expect all lower iterations see the state's ShouldExitCurrentIteration to be false.
// There is however a race condition during the check here, another Break could've happen
// in between retrieving LowestBreakIteration value and ShouldExitCurrentIteration
// which changes the value of ShouldExitCurrentIteration.
// We do another sample instead of LowestBreakIteration before failing the test
Assert.False(i < lbi, string.Format("Expecting ShouldExitCurrentIteration to be false for iteration {0}, LowestBreakIteration is {1}", i, lbi));
}
if (!_lowestBreakIter.HasValue || (_lowestBreakIter.HasValue && i < _lowestBreakIter.Value))
{
// If calls Break for the first time or if current iteration less than LowestBreakIteration,
// call Break() again, and make sure LowestBreakIteration value gets updated
try
{
state.Break();
_lowestBreakIter = state.LowestBreakIteration; // Save the lowest beak iteration
// If the test is checking the scenario where break is called after stop then
// we expect an InvalidOperationException
Assert.False(catchException, "Not getting InvalidOperationException from Break() when expecting one");
}
// If the test is checking the scenario where break is called after stop then
// we expect an InvalidOperationException
catch (InvalidOperationException) when (catchException)
{
}
}
}
/// <summary>
/// This actions tests multiple Break calls from different iteration loops
///
/// Helper function that calls Break for the first parameters.Count/2 iterations
///
/// If Break was already called then check that
/// 1) If this iteration is greater than the lowest break iteration, then shouldExitCurrentIteration should be true
/// 2) if this iteration is lower than the lowest break iteration then shouldExitCurrentIteration should be false
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
/// <param name="catchException"></param>
private void MultipleBreakAction(long i, ParallelLoopState state)
{
if (Interlocked.Increment(ref _iterCount) < _parameters.Count / 2)
{
state.Break();
lock (_lock)
{
// Save the lowest beak iteration
//m_lowestBreakIter = !m_lowestBreakIter.HasValue ? i : Math.Min(m_lowestBreakIter.Value, i);
if (!_lowestBreakIter.HasValue)
_lowestBreakIter = i;
if (_lowestBreakIter.Value > i)
_lowestBreakIter = i;
}
}
else
{
// If we previously called Break() on the parallel state,
// we expect all higher iterations see the state's ShouldExitCurrentIteration to be true
if (state.LowestBreakIteration.HasValue)
{
Assert.False(i > state.LowestBreakIteration.Value && !state.ShouldExitCurrentIteration,
string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}, LowestBreakIteration is {1}", i, state.LowestBreakIteration.Value));
}
if (state.LowestBreakIteration.HasValue && i < state.LowestBreakIteration.Value && state.ShouldExitCurrentIteration)
{
long lbi = state.LowestBreakIteration.Value;
// If we previously called Break() on the parallel state,
// we expect all lower iterations see the state's ShouldExitCurrentIteration to be false.
// There is however a race condition during the check here, another Break could've happen
// in between retrieving LowestBreakIteration value and ShouldExitCurrentIteration
// which changes the value of ShouldExitCurrentIteration.
// We do another sample instead of LowestBreakIteration before failing the test
Assert.False(i < lbi, string.Format("Expecting ShouldExitCurrentIteration to be false for iteration {0}, LowestBreakIteration is {1}", i, lbi));
}
}
}
/// <summary>
/// Note!! This function is not thread safe and care must be taken so it is not called concurrently
///
/// This helper throws an exception from the current iteration if an exception is not already thrown
///
/// 1) If an exception was previously thrown (m_isExceptional = true), then it checks if
/// ParallelLoopState-->IsExceptional is set
/// 2) If an exception was previously thrown then it checks if ParallelLoopState-->ShouldExitCurrentIteration is true
///
/// If an exception was not thrown before this, then throw an exception and set test flag m_isExceptional to true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void ExceptionalAction(long i, ParallelLoopState state)
{
Debug.WriteLine("Calling ExceptionalAction on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Assert.False(_isExceptional != state.IsExceptional, string.Format("IsExceptional is expected to be {0} while getting {1}", _isExceptional, state.IsExceptional));
// Previous iteration throws exception, the Parallel should stop it's work
Assert.False(_isExceptional && !state.ShouldExitCurrentIteration, string.Format("Expecting ShouldExitCurrentIteration to be true, since Exception was thrown on previous iterations"));
try
{
throw new InvalidOperationException("Throws test exception to verify it got handled properly");
}
finally
{
_isExceptional = true;
}
}
/// <summary>
/// This is the thread safe version of an action that throws exceptions and called be called concurrently
///
/// This action throws an exception for the first parameters.Count/2 iterations
///
/// For the rest of the actions, it performs the following checks
/// 1) If an exception was already thrown then check that ParallelLoopState->IsException is true
/// 2) If an exception was already thrown then check that ParallelLoopState->ShouldExitCurrentIteration is true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void MultipleExceptionAction(long i, ParallelLoopState state)
{
Debug.WriteLine("Calling ExceptionalAction2 on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
if (Interlocked.Increment(ref _iterCount) < _parameters.Count / 2)
{
try
{
throw new System.InvalidOperationException("Throws test exception to verify it got handled properly");
}
finally
{
_isExceptional = true;
}
}
else
{
Assert.False(state.IsExceptional && !_isExceptional, string.Format("IsExceptional is expected to be {0} while getting {1}", _isExceptional, state.IsExceptional));
// Previous iteration throws exception, the Parallel should stop it's work
Assert.False(state.IsExceptional && !state.ShouldExitCurrentIteration,
string.Format("Expecting ShouldExitCurrentIteration to be true, since Exception was thrown on previous iterations"));
}
}
#endregion
#region Helper Methods
public static double ZetaSequence(int n)
{
double result = 0;
for (int i = 1; i < n; i++)
{
result += 1.0 / ((double)i * (double)i);
}
return result;
}
/// <summary>
/// Called when a Thread is being used for the first time in the Parallel loop
/// Used by the 64 bit versions of Parallel.For
/// </summary>
/// <returns>a list where each loop body will store a unique result for verification</returns>
private List<long> ThreadLocalInit64()
{
List<long> local = new List<long>();
return local;
}
/// <summary>
/// Called when a Thread has completed execution in the Parallel loop
/// Used by the 64 bit versions of Parallel.For
/// </summary>
/// <returns>Stores the ThreadLocal list of results to a global container for verification</returns>
private void ThreadLocalFinally64(List<long> local)
{
//add this row to the global sequences
//sequences.Add(Environment.CurrentManagedThreadId, local);
long index = Interlocked.Increment(ref _threadCount) - 1;
_sequences64[index] = local;
}
/// <summary>
/// Called when a Thread is being used for the first time in the Parallel loop
/// </summary>
/// <returns>a list where each loop body will store a unique result for verification</returns>
private List<int> ThreadLocalInit()
{
List<int> local = new List<int>();
return local;
}
/// <summary>
/// Called when a Thread has completed execution in the Parallel loop
/// </summary>
/// <returns>Stores the ThreadLocal list of results to a global container for verification</returns>
private void ThreadLocalFinally(List<int> local)
{
//add this row to the global sequences
//sequences.Add(Environment.CurrentManagedThreadId, local);
long index = Interlocked.Increment(ref _threadCount) - 1;
_sequences[(int)index] = local;
}
/// <summary>
/// Checks that the result returned by the body of iteration i is correct
/// </summary>
/// <param name="i"></param>
/// <returns></returns>
private void Verify(int i)
{
//Function point comparison cant be done by rounding off to nearest decimal points since
//1.64 could be represented as 1.63999999 or as 1.6499999999. To perform floating point comparisons,
//a range has to be defined and check to ensure that the result obtained is within the specified range
double minLimit = 1.63;
double maxLimit = 1.65;
if (_results[i] < minLimit || _results[i] > maxLimit)
{
Assert.False(double.MinValue == _results[i], string.Format("results[{0}] has been revisited", i));
if (_isStopped && 0 == _results[i])
Debug.WriteLine("Stopped calculation at index = {0}", i);
Assert.True(_isStopped && 0 == _results[i],
string.Format("Incorrect results[{0}]. Expected to lie between {1} and {2}, but got {3})", i, minLimit, maxLimit, _results[i]));
}
}
/// <summary>
/// Used to verify the result of a loop that was 'Stopped'
///
/// Expected:
/// 1) A ParallelLoopResult with IsCompleted = false and LowestBreakIteration = null
/// 2) For results that were processed, the body stored the correct value
/// </summary>
/// <param name="loopResult"></param>
/// <returns></returns>
private void StopVerification(ParallelLoopResult? loopResult)
{
Assert.False(loopResult == null, "No ParallelLoopResult returned");
Assert.False(loopResult.Value.IsCompleted == true || loopResult.Value.LowestBreakIteration != null,
string.Format("ParallelLoopResult invalid, expecting Completed=false,LowestBreakIteration=null, actual: {0}, {1}", loopResult.Value.IsCompleted, loopResult.Value.LowestBreakIteration));
for (int i = 0; i < _parameters.Count; i++)
Verify(i);
}
/// <summary>
/// This verification is used we successfully called 'Break' on the loop
///
/// Expected:
/// 1) A valid ParallelLoopResult was returned with IsCompleted = false & LowestBreakIteration = lowest iteration on which
/// the test called Break
/// 2) For results that were processed, the body stored the correct value
/// </summary>
/// <param name="loopResult"></param>
/// <returns></returns>
private void BreakVerification(ParallelLoopResult? loopResult)
{
Assert.False(loopResult == null, "No ParallelLoopResult returned");
Assert.False(loopResult.Value.IsCompleted == true || loopResult.Value.LowestBreakIteration == null || loopResult.Value.LowestBreakIteration != _lowestBreakIter,
string.Format("ParallelLoopResult invalid, expecting Completed=false,LowestBreakIteration={0}, actual: {1}, {2}", _lowestBreakIter, loopResult.Value.IsCompleted, loopResult.Value.LowestBreakIteration));
for (int i = 0; i < _lowestBreakIter.Value - _startIndex; i++)
Verify(i);
}
/// <summary>
/// This verification is called when we expect an exception from the test
///
/// Expected: ParallelLoopResult is returned as null
/// </summary>
/// <param name="loopResult"></param>
/// <returns></returns>
private void ExceptionalVerification(ParallelLoopResult? loopResult)
{
Assert.Null(loopResult);
}
#endregion
}
public enum API
{
For,
ForeachOnArray,
ForeachOnList,
}
public class TestParameters
{
public TestParameters()
{
Api = API.For;
}
public API Api; // the api to be tested
public int Count; // the count of loop range
public IEnumerable<string> Actions;
public IEnumerable<string> Verifications;
public bool ExpectingException; // Exception is expected
public bool WithLocalState;
public bool Is64;
}
public sealed class ParallelState
{
[Fact]
[OuterLoop]
public static void ParallelState0()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState1()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState2()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState3()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState4()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState5()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState6()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState7()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState8()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState9()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState10()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState11()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState12()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState13()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState14()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState15()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState16()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState17()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState18()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState19()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState20()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState21()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState22()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState23()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState24()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState25()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState26()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState27()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState28()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState29()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState30()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState31()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState32()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState33()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState34()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState35()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState36()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState37()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState38()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState39()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState40()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState41()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState42()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState43()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState44()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState45()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState46()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState47()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState48()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState49()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState50()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState51()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState52()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState53()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState54()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState55()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState56()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState57()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState58()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState59()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState60()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState61()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState62()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState63()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState64()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState65()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState66()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState67()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState68()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState69()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState70()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState71()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState72()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState73()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState74()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState75()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState76()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// This file contains functional tests for ParallelLoopState
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using Xunit;
namespace System.Threading.Tasks.Test
{
public sealed class ParallelStateTest
{
#region Private Fields
private readonly object _lock = new object();
private readonly IEnumerable<int> _collection = null; // the collection used in Foreach
private readonly Barrier _barrier;
// Holds list of available actions
private readonly Dictionary<string, Action<long, ParallelLoopState>> _availableActions = new Dictionary<string, Action<long, ParallelLoopState>>();
private readonly Dictionary<string, Action<ParallelLoopResult?>> _availableVerifications = new Dictionary<string, Action<ParallelLoopResult?>>();
private readonly TestParameters _parameters;
private readonly ManualResetEventSlim _mreSlim;
private readonly double[] _results; // global place to store the workload result for verification
// data structure used with ParallelLoopState<TLocal>
// each row is the sequence of loop "index" finished in the same thread
// private Dictionary<int, List<int>> sequences;
private long _threadCount;
private readonly List<int>[] _sequences;
private readonly List<long>[] _sequences64;
private long _startIndex = 0; // start index for the loop
// Hold list of actions to be performed
private List<Action<long, ParallelLoopState>> _actions = new List<Action<long, ParallelLoopState>>();
// Hold list of verification
private Queue<Action<ParallelLoopResult?>> _verifications = new Queue<Action<ParallelLoopResult?>>();
private volatile bool _isStopped = false; // Flag to indicate that we called Stop() on the Parallel state
private long? _lowestBreakIter = null; // LowestBreakIteration value holder, null indicates that Break hasn't been called
private volatile bool _isExceptional = false; // Flag to indicate exception thrown in the test
private int _iterCount = 0; // test own counter for certain scenario, so the test can change behaviour after certain number of loop iteration
#endregion
#region Constructor
public ParallelStateTest(TestParameters parameters)
{
_parameters = parameters;
_mreSlim = new ManualResetEventSlim(false);
_results = new double[parameters.Count];
_sequences = new List<int>[1024];
_sequences64 = new List<long>[1024];
_threadCount = 0;
// Set available actions
_availableActions["Stop"] = StopAction;
_availableActions["Break"] = BreakAction;
_availableActions["Exceptional"] = ExceptionalAction;
_availableActions["MultipleStop"] = MultipleStopAction;
_availableActions["MultipleBreak"] = MultipleBreakAction;
_availableActions["MultipleException"] = MultipleExceptionAction;
_availableActions["SyncWaitStop"] = SyncWaitStop;
_availableActions["SyncSetStop"] = SyncSetStop;
_availableActions["SyncWaitBreak"] = SyncWaitBreak;
_availableActions["SyncSetBreak"] = SyncSetBreak;
_availableActions["SyncWaitStopCatchExp"] = SyncWaitStopCatchExp;
_availableActions["SyncWaitBreakCatchExp"] = SyncWaitBreakCatchExp;
_availableActions["SyncWaitExceptional"] = SyncWaitExceptional;
_availableActions["SyncSetExceptional"] = SyncSetExceptional;
// Set available verifications
_availableVerifications["StopVerification"] = StopVerification;
_availableVerifications["BreakVerification"] = BreakVerification;
_availableVerifications["ExceptionalVerification"] = ExceptionalVerification;
_barrier = new Barrier(parameters.Count);
// A barrier is used in the workload to ensure that all tasks are running before any proceed.
// This causes delays if the count is higher than the number of processors, as the thread pool
// will need to (slowly) inject additional threads to meet the demand. As a less-than-ideal
// workaround, we change the thread pool's min thread count to be at least the number required
// for the test. Not perfect, but better than nothing.
ThreadPoolHelpers.EnsureMinThreadsAtLeast(parameters.Count);
int length = parameters.Count;
if (length < 0)
length = 0;
if (parameters.Api != API.For)
{
int[] collArray = new int[length];
for (int j = 0; j < length; j++)
collArray[j] = ((int)_startIndex) + j;
if (parameters.Api == API.ForeachOnArray)
_collection = collArray;
else if (parameters.Api == API.ForeachOnList)
_collection = new List<int>(collArray);
else
_collection = collArray;
}
int index = 0;
for (index = 0; index < parameters.Count; index++)
_actions.Add(DummyAction);
index = 0;
foreach (string action in parameters.Actions)
{
Action<long, ParallelLoopState> a = null;
string[] actionIndexPair = action.Split('_');
if (!_availableActions.TryGetValue(actionIndexPair[0], out a))
throw new ArgumentException(actionIndexPair[0] + " is not a valid action");
_actions[actionIndexPair.Length > 1 ? int.Parse(actionIndexPair[1]) : index++] = a;
}
foreach (string verification in parameters.Verifications)
{
Action<ParallelLoopResult?> act = null;
if (!_availableVerifications.TryGetValue(verification, out act))
throw new ArgumentException(verification + " is not a valid verification");
_verifications.Enqueue(act);
}
}
#endregion
internal void RealRun()
{
ParallelLoopResult? loopResult = null;
try
{
if (!_parameters.Is64)
{
if (_parameters.Api == API.For)
{
if (_parameters.WithLocalState)
{
// call Parallel.For with step and ParallelLoopState<TLocal>, plus threadLocalFinally
loopResult = Parallel.For<List<int>>((int)_startIndex, (int)_startIndex + _parameters.Count, ThreadLocalInit, WorkWithLocalState, ThreadLocalFinally);
}
else
{
loopResult = Parallel.For((int)_startIndex, (int)_startIndex + _parameters.Count, WorkWithNoLocalState);
}
}
else
{
if (_parameters.WithLocalState)
{
// call Parallel.Foreach and ParallelLoopState<TLocal>, plus threadLocalFinally
loopResult = Parallel.ForEach<int, List<int>>(_collection, ThreadLocalInit, WorkWithLocalState, ThreadLocalFinally);
}
else
{
loopResult = Parallel.ForEach<int>(_collection, WorkWithNoLocalState);
}
}
}
else
{
_startIndex = int.MaxValue;
if (_parameters.Api == API.For)
{
if (_parameters.WithLocalState)
{
// call Parallel.For with step and ParallelLoopState<TLocal>, plus threadLocalFinally
loopResult = Parallel.For<List<long>>(_startIndex, _startIndex + _parameters.Count, ThreadLocalInit64, WorkWithLocalState, ThreadLocalFinally64);
}
else
{
loopResult = Parallel.For(_startIndex, _startIndex + _parameters.Count, WorkWithNoLocalState);
}
}
}
Assert.False(_parameters.ExpectingException, "SystemInvalidOperation Exception was not thrown when expecting one");
}
catch (AggregateException exp)
{
if (_parameters.ExpectingException)
Assert.IsType<InvalidOperationException>(exp.Flatten().InnerException);
}
// If the config file specified what verifications to use for this test, verify result
while (_verifications.Count > 0)
{
_verifications.Dequeue().Invoke(loopResult);
}
}
#region Workloads
// Workload for Parallel.For / Foreach
private void Work(long i)
{
//
// Make sure all task are spawned, before moving on
//
_barrier.SignalAndWait();
if (_results[i - _startIndex] == 0)
_results[i - _startIndex] = ZetaSequence((int)(i - _startIndex) + 1000);
else
_results[i - _startIndex] = double.MinValue; //same index should not be processed twice
}
// Workload for Parallel.For / Foreach with parallelloopstate but no thread local state
private void WorkWithNoLocalState(int i, ParallelLoopState state)
{
Debug.WriteLine("WorkWithNoLocalState(int) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
_actions[i].Invoke(i, state);
}
// Workload for Parallel.For / Foreach with parallel loopstate and thread local state
private List<int> WorkWithLocalState(int i, ParallelLoopState state, List<int> threadLocalValue)
{
Debug.WriteLine("WorkWithLocalState(int) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
threadLocalValue.Add(i + (int)_startIndex);
_actions[i].Invoke(i, state);
return threadLocalValue;
}
// Workload for Parallel.For / Foreach with index, parallel loop state and thread local state
private List<int> WorkWithLocalState(int i, int index, ParallelLoopState state, List<int> threadLocalValue)
{
Debug.WriteLine("WorkWithLocalState(int, index) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
threadLocalValue.Add(index + (int)_startIndex);
_actions[index].Invoke(index, state);
return threadLocalValue;
}
// Workload for Parallel.For with long range
private void WorkWithNoLocalState(long i, ParallelLoopState state)
{
Debug.WriteLine("WorkWithNoLocalState(long) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
_actions[(int)(i - _startIndex)].Invoke(i, state);
}
// Workload for Parallel.For with long range
private List<long> WorkWithLocalState(long i, ParallelLoopState state, List<long> threadLocalValue)
{
Debug.WriteLine("WorkWithLocalState(long) on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Work(i);
threadLocalValue.Add(i + _startIndex);
_actions[(int)(i - _startIndex)].Invoke(i, state);
return threadLocalValue;
}
/// <summary>
/// This action waits for the other iteration to call Stop and
/// set the MRE when its done. Once the MRE is set, this function
/// calls Break which results in an InvalidOperationException
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitBreakCatchExp(long i, ParallelLoopState state)
{
_mreSlim.Wait();
BreakActionHelper(i, state, true);
}
/// <summary>
/// This action waits for the other iteration to call Break and
/// set the MRE when its done. Once the MRE is set, this function
/// calls Stop which results in an InvalidOperationException
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitStopCatchExp(long i, ParallelLoopState state)
{
_mreSlim.Wait();
StopActionHelper(i, state, true);
}
/// <summary>
/// This action waits for the other iteration to call Break and
/// set the MRE when its done. Once the MRE is set, this function
/// calls Break which results in the lower iteration winning
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitBreak(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncWaitBreakAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
_mreSlim.Wait();
BreakAction(i, state);
}
/// <summary>
/// This action calls Break and notifies other iterations
/// by setting the shared MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncSetBreak(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncSetBreakAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
// Do some sleep to reduce race condition with next action
Task delay = Task.Delay(10);
delay.Wait();
BreakAction(i, state);
_mreSlim.Set();
}
/// <summary>
/// This function waits for another iteration to call Stop
/// and then set the shared MRE to notify when it is done
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitStop(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncWaitStopAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
_mreSlim.Wait();
StopAction(i, state);
}
/// <summary>
/// This action calls Stop and notifies the other iteration by setting the MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncSetStop(long i, ParallelLoopState state)
{
//Logger.LogInformation("Calling SyncSetStopAction on index {0}, StartIndex: {1}, real index {2}", i, StartIndex, i - StartIndex);
// Do some sleep to reduce race condition with next action
Task delay = Task.Delay(10);
delay.Wait();
StopAction(i, state);
_mreSlim.Set();
}
/// <summary>
/// This action waits for another iteration to throw an exception and notify
/// when it is done by setting the MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncWaitExceptional(long i, ParallelLoopState state)
{
_mreSlim.Wait();
ExceptionalAction(i, state);
}
/// <summary>
/// This action throws an exception and notifies the rest of the iterations
/// by setting a shared MRE
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void SyncSetExceptional(long i, ParallelLoopState state)
{
// Do some sleep to reduce race condition with next action
Task delay = Task.Delay(10);
delay.Wait();
ExceptionalAction(i, state);
_mreSlim.Set();
}
/// <summary>
/// This action is a NOP - does nothing
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void DummyAction(long i, ParallelLoopState state)
{
}
/// <summary>
/// This actions calls Stop on the current iteration. Note that this is called by only one iteration in the loop
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void StopAction(long i, ParallelLoopState state)
{
StopActionHelper(i, state, false);
}
/// <summary>
/// Calls Break for the current Iteration. Note that this is called by only one iteration in the loop
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void BreakAction(long i, ParallelLoopState state)
{
BreakActionHelper(i, state, false);
}
/// <summary>
/// Note!! This function is not threadsafe and care must be taken so that it is not called concurrently
///
/// Helper function that calls Stop for the current iteration and sets test flag(m_isStopped) to true
///
/// 1) If stop was already called, check if ParallelLoopState-->IsStopped is true
/// 2) If stop was already called, check if ParallelLoopState-->ShouldExitCurrentIteration is true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
/// <param name="catchException"></param>
private void StopActionHelper(long i, ParallelLoopState state, bool catchException)
{
Debug.WriteLine("Calling StopAction on index: {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
// We already called Stop() on the Parallel state
Assert.False(_isStopped && _isStopped != state.IsStopped, string.Format("Expecting IsStopped to be true for iteration {0}", i));
// If we previously called Stop() on the parallel state,
// we expect all iterations see the state's ShouldExitCurrentIteration to be true
Assert.False(_isStopped && !state.ShouldExitCurrentIteration, string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}", i));
try
{
state.Stop();
_isStopped = true;
// If Stop is called after a Break was called then an InvalidOperationException is expected
Assert.False(catchException, "Not getting InvalidOperationException from Stop() when expecting one");
}
// If Stop is called after a Break was called then an InvalidOperationException is expected
catch (InvalidOperationException) when (catchException)
{
}
}
/// <summary>
/// Thread safe version of Stop Action. This can safely be invoked concurrently
///
/// Stops the loop for first parameters.Count/2 iterations and sets the test flag (m_iterCount) to indicate this
///
/// 1) If Stop was previously called, then check that ParallelLoopState-->IsStopped is set to true
/// 2) If Stop was previously called, then check that ParallelLoopState-->ShouldExitCurrentIteration is true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
/// <param name="catchException"></param>
private void MultipleStopAction(long i, ParallelLoopState state)
{
if (Interlocked.Increment(ref _iterCount) < _parameters.Count / 2)
{
state.Stop();
_isStopped = true;
}
else
{
// We already called Stop() on the Parallel state
Assert.False(_isStopped && !state.IsStopped, string.Format("Expecting IsStopped to be true for iteration {0}", i));
// If we previously called Stop() on the parallel state,
// we expect all iterations see the state's ShouldExitCurrentIteration to be true
Assert.False(_isStopped && !state.ShouldExitCurrentIteration, string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}", i));
}
}
/// <summary>
/// NOTE!!! that this function is not thread safe and cannot be called concurrently
///
/// Helper function that calls Break for the current iteration if
/// 1) Break has never been called so far
/// 2) if the current iteration is smaller than the iteration for which Break was previously called
///
/// If Break was already called then check that
/// 1) The lowest break iteration stored by us is the same as the one passed in State
/// 2) If this iteration is greater than the lowest break iteration, then shouldExitCurrentIteration should be true
/// 3) if this iteration is lower than the lowest break iteration then shouldExitCurrentIteration should be false
/// </summary>
/// <param name="i">current iteration </param>
/// <param name="state">the parallel loop state</param>
/// <param name="catchException">whether calling Break will throw an InvalidOperationException</param>
private void BreakActionHelper(long i, ParallelLoopState state, bool catchException)
{
Debug.WriteLine("Calling BreakAction on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
// If we previously called Break() on the parallel state,
// we expect all iterations to have the same LowestBreakIteration value
if (_lowestBreakIter.HasValue)
{
Assert.False(state.LowestBreakIteration.Value != _lowestBreakIter.Value,
string.Format("Expecting LowestBreakIteration value to be {0} for iteration {1}, while getting {2}", _lowestBreakIter, i, state.LowestBreakIteration.Value));
// If we previously called Break() on the parallel state,
// we expect all higher iterations see the state's ShouldExitCurrentIteration to be true
Assert.False(i > _lowestBreakIter.Value && !state.ShouldExitCurrentIteration,
string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}, LowestBreakIteration is {1}", i, _lowestBreakIter));
}
if (_lowestBreakIter.HasValue && i < _lowestBreakIter.Value && state.ShouldExitCurrentIteration)
{
long lbi = _lowestBreakIter.Value;
// If we previously called Break() on the parallel state,
// we expect all lower iterations see the state's ShouldExitCurrentIteration to be false.
// There is however a race condition during the check here, another Break could've happen
// in between retrieving LowestBreakIteration value and ShouldExitCurrentIteration
// which changes the value of ShouldExitCurrentIteration.
// We do another sample instead of LowestBreakIteration before failing the test
Assert.False(i < lbi, string.Format("Expecting ShouldExitCurrentIteration to be false for iteration {0}, LowestBreakIteration is {1}", i, lbi));
}
if (!_lowestBreakIter.HasValue || (_lowestBreakIter.HasValue && i < _lowestBreakIter.Value))
{
// If calls Break for the first time or if current iteration less than LowestBreakIteration,
// call Break() again, and make sure LowestBreakIteration value gets updated
try
{
state.Break();
_lowestBreakIter = state.LowestBreakIteration; // Save the lowest beak iteration
// If the test is checking the scenario where break is called after stop then
// we expect an InvalidOperationException
Assert.False(catchException, "Not getting InvalidOperationException from Break() when expecting one");
}
// If the test is checking the scenario where break is called after stop then
// we expect an InvalidOperationException
catch (InvalidOperationException) when (catchException)
{
}
}
}
/// <summary>
/// This actions tests multiple Break calls from different iteration loops
///
/// Helper function that calls Break for the first parameters.Count/2 iterations
///
/// If Break was already called then check that
/// 1) If this iteration is greater than the lowest break iteration, then shouldExitCurrentIteration should be true
/// 2) if this iteration is lower than the lowest break iteration then shouldExitCurrentIteration should be false
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
/// <param name="catchException"></param>
private void MultipleBreakAction(long i, ParallelLoopState state)
{
if (Interlocked.Increment(ref _iterCount) < _parameters.Count / 2)
{
state.Break();
lock (_lock)
{
// Save the lowest beak iteration
//m_lowestBreakIter = !m_lowestBreakIter.HasValue ? i : Math.Min(m_lowestBreakIter.Value, i);
if (!_lowestBreakIter.HasValue)
_lowestBreakIter = i;
if (_lowestBreakIter.Value > i)
_lowestBreakIter = i;
}
}
else
{
// If we previously called Break() on the parallel state,
// we expect all higher iterations see the state's ShouldExitCurrentIteration to be true
if (state.LowestBreakIteration.HasValue)
{
Assert.False(i > state.LowestBreakIteration.Value && !state.ShouldExitCurrentIteration,
string.Format("Expecting ShouldExitCurrentIteration to be true for iteration {0}, LowestBreakIteration is {1}", i, state.LowestBreakIteration.Value));
}
if (state.LowestBreakIteration.HasValue && i < state.LowestBreakIteration.Value && state.ShouldExitCurrentIteration)
{
long lbi = state.LowestBreakIteration.Value;
// If we previously called Break() on the parallel state,
// we expect all lower iterations see the state's ShouldExitCurrentIteration to be false.
// There is however a race condition during the check here, another Break could've happen
// in between retrieving LowestBreakIteration value and ShouldExitCurrentIteration
// which changes the value of ShouldExitCurrentIteration.
// We do another sample instead of LowestBreakIteration before failing the test
Assert.False(i < lbi, string.Format("Expecting ShouldExitCurrentIteration to be false for iteration {0}, LowestBreakIteration is {1}", i, lbi));
}
}
}
/// <summary>
/// Note!! This function is not thread safe and care must be taken so it is not called concurrently
///
/// This helper throws an exception from the current iteration if an exception is not already thrown
///
/// 1) If an exception was previously thrown (m_isExceptional = true), then it checks if
/// ParallelLoopState-->IsExceptional is set
/// 2) If an exception was previously thrown then it checks if ParallelLoopState-->ShouldExitCurrentIteration is true
///
/// If an exception was not thrown before this, then throw an exception and set test flag m_isExceptional to true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void ExceptionalAction(long i, ParallelLoopState state)
{
Debug.WriteLine("Calling ExceptionalAction on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
Assert.False(_isExceptional != state.IsExceptional, string.Format("IsExceptional is expected to be {0} while getting {1}", _isExceptional, state.IsExceptional));
// Previous iteration throws exception, the Parallel should stop it's work
Assert.False(_isExceptional && !state.ShouldExitCurrentIteration, string.Format("Expecting ShouldExitCurrentIteration to be true, since Exception was thrown on previous iterations"));
try
{
throw new InvalidOperationException("Throws test exception to verify it got handled properly");
}
finally
{
_isExceptional = true;
}
}
/// <summary>
/// This is the thread safe version of an action that throws exceptions and called be called concurrently
///
/// This action throws an exception for the first parameters.Count/2 iterations
///
/// For the rest of the actions, it performs the following checks
/// 1) If an exception was already thrown then check that ParallelLoopState->IsException is true
/// 2) If an exception was already thrown then check that ParallelLoopState->ShouldExitCurrentIteration is true
/// </summary>
/// <param name="i"></param>
/// <param name="state"></param>
private void MultipleExceptionAction(long i, ParallelLoopState state)
{
Debug.WriteLine("Calling ExceptionalAction2 on index {0}, StartIndex: {1}, real index {2}", i, _startIndex, i - _startIndex);
if (Interlocked.Increment(ref _iterCount) < _parameters.Count / 2)
{
try
{
throw new System.InvalidOperationException("Throws test exception to verify it got handled properly");
}
finally
{
_isExceptional = true;
}
}
else
{
Assert.False(state.IsExceptional && !_isExceptional, string.Format("IsExceptional is expected to be {0} while getting {1}", _isExceptional, state.IsExceptional));
// Previous iteration throws exception, the Parallel should stop it's work
Assert.False(state.IsExceptional && !state.ShouldExitCurrentIteration,
string.Format("Expecting ShouldExitCurrentIteration to be true, since Exception was thrown on previous iterations"));
}
}
#endregion
#region Helper Methods
public static double ZetaSequence(int n)
{
double result = 0;
for (int i = 1; i < n; i++)
{
result += 1.0 / ((double)i * (double)i);
}
return result;
}
/// <summary>
/// Called when a Thread is being used for the first time in the Parallel loop
/// Used by the 64 bit versions of Parallel.For
/// </summary>
/// <returns>a list where each loop body will store a unique result for verification</returns>
private List<long> ThreadLocalInit64()
{
List<long> local = new List<long>();
return local;
}
/// <summary>
/// Called when a Thread has completed execution in the Parallel loop
/// Used by the 64 bit versions of Parallel.For
/// </summary>
/// <returns>Stores the ThreadLocal list of results to a global container for verification</returns>
private void ThreadLocalFinally64(List<long> local)
{
//add this row to the global sequences
//sequences.Add(Environment.CurrentManagedThreadId, local);
long index = Interlocked.Increment(ref _threadCount) - 1;
_sequences64[index] = local;
}
/// <summary>
/// Called when a Thread is being used for the first time in the Parallel loop
/// </summary>
/// <returns>a list where each loop body will store a unique result for verification</returns>
private List<int> ThreadLocalInit()
{
List<int> local = new List<int>();
return local;
}
/// <summary>
/// Called when a Thread has completed execution in the Parallel loop
/// </summary>
/// <returns>Stores the ThreadLocal list of results to a global container for verification</returns>
private void ThreadLocalFinally(List<int> local)
{
//add this row to the global sequences
//sequences.Add(Environment.CurrentManagedThreadId, local);
long index = Interlocked.Increment(ref _threadCount) - 1;
_sequences[(int)index] = local;
}
/// <summary>
/// Checks that the result returned by the body of iteration i is correct
/// </summary>
/// <param name="i"></param>
/// <returns></returns>
private void Verify(int i)
{
//Function point comparison cant be done by rounding off to nearest decimal points since
//1.64 could be represented as 1.63999999 or as 1.6499999999. To perform floating point comparisons,
//a range has to be defined and check to ensure that the result obtained is within the specified range
double minLimit = 1.63;
double maxLimit = 1.65;
if (_results[i] < minLimit || _results[i] > maxLimit)
{
Assert.False(double.MinValue == _results[i], string.Format("results[{0}] has been revisited", i));
if (_isStopped && 0 == _results[i])
Debug.WriteLine("Stopped calculation at index = {0}", i);
Assert.True(_isStopped && 0 == _results[i],
string.Format("Incorrect results[{0}]. Expected to lie between {1} and {2}, but got {3})", i, minLimit, maxLimit, _results[i]));
}
}
/// <summary>
/// Used to verify the result of a loop that was 'Stopped'
///
/// Expected:
/// 1) A ParallelLoopResult with IsCompleted = false and LowestBreakIteration = null
/// 2) For results that were processed, the body stored the correct value
/// </summary>
/// <param name="loopResult"></param>
/// <returns></returns>
private void StopVerification(ParallelLoopResult? loopResult)
{
Assert.False(loopResult == null, "No ParallelLoopResult returned");
Assert.False(loopResult.Value.IsCompleted == true || loopResult.Value.LowestBreakIteration != null,
string.Format("ParallelLoopResult invalid, expecting Completed=false,LowestBreakIteration=null, actual: {0}, {1}", loopResult.Value.IsCompleted, loopResult.Value.LowestBreakIteration));
for (int i = 0; i < _parameters.Count; i++)
Verify(i);
}
/// <summary>
/// This verification is used we successfully called 'Break' on the loop
///
/// Expected:
/// 1) A valid ParallelLoopResult was returned with IsCompleted = false & LowestBreakIteration = lowest iteration on which
/// the test called Break
/// 2) For results that were processed, the body stored the correct value
/// </summary>
/// <param name="loopResult"></param>
/// <returns></returns>
private void BreakVerification(ParallelLoopResult? loopResult)
{
Assert.False(loopResult == null, "No ParallelLoopResult returned");
Assert.False(loopResult.Value.IsCompleted == true || loopResult.Value.LowestBreakIteration == null || loopResult.Value.LowestBreakIteration != _lowestBreakIter,
string.Format("ParallelLoopResult invalid, expecting Completed=false,LowestBreakIteration={0}, actual: {1}, {2}", _lowestBreakIter, loopResult.Value.IsCompleted, loopResult.Value.LowestBreakIteration));
for (int i = 0; i < _lowestBreakIter.Value - _startIndex; i++)
Verify(i);
}
/// <summary>
/// This verification is called when we expect an exception from the test
///
/// Expected: ParallelLoopResult is returned as null
/// </summary>
/// <param name="loopResult"></param>
/// <returns></returns>
private void ExceptionalVerification(ParallelLoopResult? loopResult)
{
Assert.Null(loopResult);
}
#endregion
}
public enum API
{
For,
ForeachOnArray,
ForeachOnList,
}
public class TestParameters
{
public TestParameters()
{
Api = API.For;
}
public API Api; // the api to be tested
public int Count; // the count of loop range
public IEnumerable<string> Actions;
public IEnumerable<string> Verifications;
public bool ExpectingException; // Exception is expected
public bool WithLocalState;
public bool Is64;
}
public sealed class ParallelState
{
[Fact]
[OuterLoop]
public static void ParallelState0()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState1()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState2()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState3()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState4()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState5()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState6()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState7()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState8()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState9()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState10()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState11()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState12()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState13()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState14()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState15()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState16()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState17()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState18()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState19()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState20()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState21()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState22()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState23()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState24()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState25()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState26()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState27()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState28()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState29()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState30()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState31()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState32()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.For,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = true,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState33()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState34()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState35()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState36()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState37()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState38()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState39()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState40()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState41()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState42()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState43()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState44()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState45()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState46()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState47()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState48()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState49()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState50()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState51()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState52()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState53()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState54()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnArray,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState55()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState56()
{
string[] actions = new string[] { "Break", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState57()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState58()
{
string[] actions = new string[] { "Exceptional", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState59()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState60()
{
string[] actions = new string[] { "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", "MultipleBreak", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState61()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState62()
{
string[] actions = new string[] { "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", "MultipleException", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState63()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState64()
{
string[] actions = new string[] { "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", "MultipleStop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState65()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState66()
{
string[] actions = new string[] { "Stop", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState67()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState68()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStopCatchExp_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState69()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState70()
{
string[] actions = new string[] { "SyncSetBreak_0", "SyncWaitStop_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState71()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState72()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreakCatchExp_1", };
string[] verifications = new string[] { "StopVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState73()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState74()
{
string[] actions = new string[] { "SyncSetStop_0", "SyncWaitBreak_1", };
string[] verifications = new string[] { "ExceptionalVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = true,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState75()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = false,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
[Fact]
[OuterLoop]
public static void ParallelState76()
{
string[] actions = new string[] { "SyncWaitBreak_0", "SyncSetBreak_1", };
string[] verifications = new string[] { "BreakVerification", };
TestParameters parameters = new TestParameters
{
Api = API.ForeachOnList,
Count = 10,
Actions = actions,
Verifications = verifications,
ExpectingException = false,
WithLocalState = true,
Is64 = false,
};
ParallelStateTest test = new ParallelStateTest(parameters);
test.RealRun();
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Reflection.Emit.ILGeneration/tests/CustomAttributeBuilderTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using Xunit;
namespace System.Reflection.Emit.Tests
{
public class CustomAttributeBuilderTests
{
public static IEnumerable<object[]> Ctor_TestData()
{
string stringValue1 = "TestString1";
string stringValue2 = "TestString2";
int intValue1 = 10;
int intValue2 = 20;
// 2 ctor, 0 properties, 1 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[0], new object[0],
new object[] { intValue2, null, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue2 },
new object[] { intValue2, null, stringValue1, intValue1 }
};
// 2 ctor, 0 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[0], new object[0],
new object[] { 0, null, stringValue1, intValue1 },
new string[0], new object[0],
new object[] { 0, null, stringValue1, intValue1 }
};
// 0 ctor, 0 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { 0, null, null, 0 },
new string[0], new object[0],
new object[] { 0, null, null, 0 }
};
// 0 ctor, 0 properties, 1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { intValue1, null, null, 0 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue1 },
new object[] { intValue1, null, null, 0 }
};
// 0 ctor, 0 properties, 2 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { intValue1, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { intValue1, stringValue1 },
new object[] { intValue1, stringValue1, null, 0 }
};
// 2 ctor, 0 properties, 2 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[0], new object[0],
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 0 ctor, 0 properties,1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { 0, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestStringField) }, new object[] { stringValue1 },
new object[] { 0, stringValue1, null, 0 }
};
// 2 ctor, 2 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new object[0], new object[0],
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 2 ctor, 1 property, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32) }, new object[] { intValue2 },
new object[] { intValue2, null, stringValue1, intValue1 },
new object[0], new object[0],
new object[] { intValue2, null, stringValue1, intValue1 }
};
// 0 ctor, 1 property, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestInt32) }, new object[] { intValue2 },
new object[] { intValue2, null, null, 0 },
new object[0], new object[0],
new object[] { intValue2, null, null, 0 }
};
// 0 ctor, 2 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, null, 0 },
new object[0], new object[0],
new object[] { intValue2, stringValue2, null, 0 }
};
// 4 ctor, 0 fields, 2 properties
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int), typeof(string), typeof(int) }), new object[] { stringValue1, intValue1, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[0], new object[0],
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 2 ctor, 2 property, 2 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 2 ctor, 1 property, 1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestString) }, new object[] { stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 0 ctor, 2 property, 1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue1, stringValue1 },
new object[] { intValue2, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue2 },
new object[] { intValue2, stringValue1, null, 0 }
};
// 2 ctor, 1 property, 0 field
string shortString = new string('a', 128);
string longString = new string('a', 16384);
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { shortString, intValue1 },
new string[] { nameof(TestAttribute.TestString) }, new object[] { longString },
new object[] { 0, longString, shortString, intValue1 },
new string[0], new object[0],
new object[] { 0, longString, shortString, intValue1 }
};
// 0 ctor, 1 property, 1 field
yield return new object[]
{
typeof(SubAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestString) }, new object[] { stringValue1 },
new object[] { intValue1, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue1 },
new object[] { intValue1, stringValue1, null, 0 }
};
}
[Theory]
[MemberData(nameof(Ctor_TestData))]
public static void Ctor(ConstructorInfo con, object[] constructorArgs,
string[] propertyNames, object[] propertyValues,
object[] expectedPropertyValues,
string[] fieldNames, object[] fieldValues,
object[] expectedFieldValues)
{
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), propertyNames);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), fieldNames);
void Verify(CustomAttributeBuilder attr)
{
VerifyCustomAttributeBuilder(attr, TestAttribute.AllProperties, expectedPropertyValues, TestAttribute.AllFields, expectedFieldValues);
}
if (namedProperties.Length == 0)
{
if (namedFields.Length == 0)
{
// Use CustomAttributeBuilder(ConstructorInfo, object[])
CustomAttributeBuilder attribute1 = new CustomAttributeBuilder(con, constructorArgs);
Verify(attribute1);
}
// Use CustomAttributeBuilder(ConstructorInfo, object[], FieldInfo[], object[])
CustomAttributeBuilder attribute2 = new CustomAttributeBuilder(con, constructorArgs, namedFields, fieldValues);
Verify(attribute2);
}
if (namedFields.Length == 0)
{
// Use CustomAttributeBuilder(ConstructorInfo, object[], PropertyInfo[], object[])
CustomAttributeBuilder attribute3 = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues);
Verify(attribute3);
}
// Use CustomAttributeBuilder(ConstructorInfo, object[], PropertyInfo[], object[], FieldInfo[], object[])
CustomAttributeBuilder attribute4 = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues, namedFields, fieldValues);
Verify(attribute4);
}
private static void VerifyCustomAttributeBuilder(CustomAttributeBuilder builder,
PropertyInfo[] propertyNames, object[] propertyValues,
FieldInfo[] fieldNames, object[] fieldValues)
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(builder);
object[] customAttributes = assembly.GetCustomAttributes().ToArray();
Assert.Equal(1, customAttributes.Length);
object customAttribute = customAttributes[0];
for (int i = 0; i < fieldNames.Length; ++i)
{
FieldInfo field = typeof(TestAttribute).GetField(fieldNames[i].Name);
Assert.Equal(fieldValues[i], field.GetValue(customAttribute));
}
for (int i = 0; i < propertyNames.Length; ++i)
{
PropertyInfo property = typeof(TestAttribute).GetProperty(propertyNames[i].Name);
Assert.Equal(propertyValues[i], property.GetValue(customAttribute));
}
}
[Fact]
public static void Ctor_AllPrimitives()
{
ConstructorInfo con = typeof(Primitives).GetConstructors()[0];
object[] constructorArgs = new object[]
{
(sbyte)1, (byte)2, (short)3, (ushort)4, 5, (uint)6, (long)7, (ulong)8,
(SByteEnum)9, (ByteEnum)10, (ShortEnum)11, (UShortEnum)12, (IntEnum)13, (UIntEnum)14, (LongEnum)15, (ULongEnum)16,
(char)17, true, 2.0f, 2.1,
"abc", typeof(object), new int[] { 24, 25, 26 }, null
};
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(Primitives), new string[]
{
nameof(Primitives.SByteProperty), nameof(Primitives.ByteProperty), nameof(Primitives.ShortProperty), nameof(Primitives.UShortProperty), nameof(Primitives.IntProperty), nameof(Primitives.UIntProperty), nameof(Primitives.LongProperty), nameof(Primitives.ULongProperty),
nameof(Primitives.SByteEnumProperty), nameof(Primitives.ByteEnumProperty), nameof(Primitives.ShortEnumProperty), nameof(Primitives.UShortEnumProperty), nameof(Primitives.IntEnumProperty), nameof(Primitives.UIntEnumProperty), nameof(Primitives.LongEnumProperty), nameof(Primitives.ULongEnumProperty),
nameof(Primitives.CharProperty), nameof(Primitives.BoolProperty), nameof(Primitives.FloatProperty), nameof(Primitives.DoubleProperty),
nameof(Primitives.StringProperty), nameof(Primitives.TypeProperty), nameof(Primitives.ArrayProperty), nameof(Primitives.ObjectProperty)
});
object[] propertyValues = new object[]
{
(sbyte)27, (byte)28, (short)29, (ushort)30, 31, (uint)32, (long)33, (ulong)34,
(SByteEnum)35, (ByteEnum)36, (ShortEnum)37, (UShortEnum)38, (IntEnum)39, (UIntEnum)40, (LongEnum)41, (ULongEnum)42,
(char)43, false, 4.4f, 4.5,
"def", typeof(bool), new int[] { 48, 49, 50 }, "stringAsObject"
};
FieldInfo[] namedFields = Helpers.GetFields(typeof(Primitives), new string[]
{
nameof(Primitives.SByteField), nameof(Primitives.ByteField), nameof(Primitives.ShortField), nameof(Primitives.UShortField), nameof(Primitives.IntField), nameof(Primitives.UIntField), nameof(Primitives.LongField), nameof(Primitives.ULongField),
nameof(Primitives.SByteEnumField), nameof(Primitives.ByteEnumField), nameof(Primitives.ShortEnumField), nameof(Primitives.UShortEnumField), nameof(Primitives.IntEnumField), nameof(Primitives.UIntEnumField), nameof(Primitives.LongEnumField), nameof(Primitives.ULongEnumField),
nameof(Primitives.CharField), nameof(Primitives.BoolField), nameof(Primitives.FloatField), nameof(Primitives.DoubleField),
nameof(Primitives.StringField), nameof(Primitives.TypeField), nameof(Primitives.ArrayField), nameof(Primitives.ObjectField)
});
object[] fieldValues = new object[]
{
(sbyte)51, (byte)52, (short)53, (ushort)54, 55, (uint)56, (long)57, (ulong)58,
(SByteEnum)59, (ByteEnum)60, (ShortEnum)61, (UShortEnum)62, (IntEnum)63, (UIntEnum)64, (LongEnum)65, (ULongEnum)66,
(char)67, true, 6.8f, 6.9,
null, null, null, 70
};
CustomAttributeBuilder attributeBuilder = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues, namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attributeBuilder);
object[] customAttributes = assembly.GetCustomAttributes().ToArray();
Assert.Equal(1, customAttributes.Length);
Primitives attribute = (Primitives)customAttributes[0];
// Constructor: primitives
Assert.Equal(constructorArgs[0], attribute.SByteConstructor);
Assert.Equal(constructorArgs[1], attribute.ByteConstructor);
Assert.Equal(constructorArgs[2], attribute.ShortConstructor);
Assert.Equal(constructorArgs[3], attribute.UShortConstructor);
Assert.Equal(constructorArgs[4], attribute.IntConstructor);
Assert.Equal(constructorArgs[5], attribute.UIntConstructor);
Assert.Equal(constructorArgs[6], attribute.LongConstructor);
Assert.Equal(constructorArgs[7], attribute.ULongConstructor);
// Constructors: enums
Assert.Equal(constructorArgs[8], attribute.SByteEnumConstructor);
Assert.Equal(constructorArgs[9], attribute.ByteEnumConstructor);
Assert.Equal(constructorArgs[10], attribute.ShortEnumConstructor);
Assert.Equal(constructorArgs[11], attribute.UShortEnumConstructor);
Assert.Equal(constructorArgs[12], attribute.IntEnumConstructor);
Assert.Equal(constructorArgs[13], attribute.UIntEnumConstructor);
Assert.Equal(constructorArgs[14], attribute.LongEnumConstructor);
Assert.Equal(constructorArgs[15], attribute.ULongEnumConstructor);
// Constructors: other primitives
Assert.Equal(constructorArgs[16], attribute.CharConstructor);
Assert.Equal(constructorArgs[17], attribute.BoolConstructor);
Assert.Equal(constructorArgs[18], attribute.FloatConstructor);
Assert.Equal(constructorArgs[19], attribute.DoubleConstructor);
// Constructors: misc
Assert.Equal(constructorArgs[20], attribute.StringConstructor);
Assert.Equal(constructorArgs[21], attribute.TypeConstructor);
Assert.Equal(constructorArgs[22], attribute.ArrayConstructor);
Assert.Equal(constructorArgs[23], attribute.ObjectConstructor);
// Field: primitives
Assert.Equal(fieldValues[0], attribute.SByteField);
Assert.Equal(fieldValues[1], attribute.ByteField);
Assert.Equal(fieldValues[2], attribute.ShortField);
Assert.Equal(fieldValues[3], attribute.UShortField);
Assert.Equal(fieldValues[4], attribute.IntField);
Assert.Equal(fieldValues[5], attribute.UIntField);
Assert.Equal(fieldValues[6], attribute.LongField);
Assert.Equal(fieldValues[7], attribute.ULongField);
// Fields: enums
Assert.Equal(fieldValues[8], attribute.SByteEnumField);
Assert.Equal(fieldValues[9], attribute.ByteEnumField);
Assert.Equal(fieldValues[10], attribute.ShortEnumField);
Assert.Equal(fieldValues[11], attribute.UShortEnumField);
Assert.Equal(fieldValues[12], attribute.IntEnumField);
Assert.Equal(fieldValues[13], attribute.UIntEnumField);
Assert.Equal(fieldValues[14], attribute.LongEnumField);
Assert.Equal(fieldValues[15], attribute.ULongEnumField);
// Fields: other primitives
Assert.Equal(fieldValues[16], attribute.CharField);
Assert.Equal(fieldValues[17], attribute.BoolField);
Assert.Equal(fieldValues[18], attribute.FloatField);
Assert.Equal(fieldValues[19], attribute.DoubleField);
// Fields: misc
Assert.Equal(fieldValues[20], attribute.StringField);
Assert.Equal(fieldValues[21], attribute.TypeField);
Assert.Equal(fieldValues[22], attribute.ArrayField);
Assert.Equal(fieldValues[23], attribute.ObjectField);
// Properties: primitives
Assert.Equal(propertyValues[0], attribute.SByteProperty);
Assert.Equal(propertyValues[1], attribute.ByteProperty);
Assert.Equal(propertyValues[2], attribute.ShortProperty);
Assert.Equal(propertyValues[3], attribute.UShortProperty);
Assert.Equal(propertyValues[4], attribute.IntProperty);
Assert.Equal(propertyValues[5], attribute.UIntProperty);
Assert.Equal(propertyValues[6], attribute.LongProperty);
Assert.Equal(propertyValues[7], attribute.ULongProperty);
// Properties: enums
Assert.Equal(propertyValues[8], attribute.SByteEnumProperty);
Assert.Equal(propertyValues[9], attribute.ByteEnumProperty);
Assert.Equal(propertyValues[10], attribute.ShortEnumProperty);
Assert.Equal(propertyValues[11], attribute.UShortEnumProperty);
Assert.Equal(propertyValues[12], attribute.IntEnumProperty);
Assert.Equal(propertyValues[13], attribute.UIntEnumProperty);
Assert.Equal(propertyValues[14], attribute.LongEnumProperty);
Assert.Equal(propertyValues[15], attribute.ULongEnumProperty);
// Properties: other primitives
Assert.Equal(propertyValues[16], attribute.CharProperty);
Assert.Equal(propertyValues[17], attribute.BoolProperty);
Assert.Equal(propertyValues[18], attribute.FloatProperty);
Assert.Equal(propertyValues[19], attribute.DoubleProperty);
// Properties: misc
Assert.Equal(propertyValues[20], attribute.StringProperty);
Assert.Equal(propertyValues[21], attribute.TypeProperty);
Assert.Equal(propertyValues[22], attribute.ArrayProperty);
Assert.Equal(propertyValues[23], attribute.ObjectProperty);
}
public static IEnumerable<object[]> Ctor_RefEmitParameters_TestData()
{
AssemblyBuilder assemblyBuilder = Helpers.DynamicAssembly();
TypeBuilder typeBuilder = assemblyBuilder.DefineDynamicModule("DynamicModule").DefineType("DynamicType", TypeAttributes.Public, typeof(Attribute));
ConstructorBuilder constructorBuilder = typeBuilder.DefineConstructor(MethodAttributes.Public, CallingConventions.Standard, new Type[0]);
constructorBuilder.GetILGenerator().Emit(OpCodes.Ret);
FieldBuilder fieldBuilder = typeBuilder.DefineField("Field", typeof(int), FieldAttributes.Public);
FieldBuilder fieldBuilderProperty = typeBuilder.DefineField("PropertyField", typeof(int), FieldAttributes.Public);
PropertyBuilder propertyBuilder = typeBuilder.DefineProperty("Property", PropertyAttributes.None, typeof(int), new Type[0]);
MethodBuilder setMethod = typeBuilder.DefineMethod("set_Property", MethodAttributes.Public, typeof(void), new Type[] { typeof(int) });
ILGenerator setMethodGenerator = setMethod.GetILGenerator();
setMethodGenerator.Emit(OpCodes.Ldarg_0);
setMethodGenerator.Emit(OpCodes.Ldarg_1);
setMethodGenerator.Emit(OpCodes.Stfld, fieldBuilderProperty);
setMethodGenerator.Emit(OpCodes.Ret);
propertyBuilder.SetSetMethod(setMethod);
Type createdType = typeBuilder.CreateTypeInfo().AsType();
// ConstructorBuilder, PropertyInfo, FieldInfo
yield return new object[]
{
constructorBuilder, new object[0],
new PropertyInfo[] { createdType.GetProperty(propertyBuilder.Name) }, new object[] { 1 },
new FieldInfo[] { createdType.GetField(fieldBuilder.Name) }, new object[] { 2 }
};
// ConstructorInfo, PropertyBuilder, FieldBuilder
yield return new object[]
{
createdType.GetConstructor(new Type[0]), new object[0],
new PropertyInfo[] { propertyBuilder }, new object[] { 1 },
new FieldInfo[] { fieldBuilder }, new object[] { 2 }
};
// ConstructorBuilder, PropertyBuilder, FieldBuilder
yield return new object[]
{
constructorBuilder, new object[0],
new PropertyInfo[] { propertyBuilder }, new object[] { 1 },
new FieldInfo[] { fieldBuilder }, new object[] { 2 }
};
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(Ctor_RefEmitParameters_TestData))]
public static void Ctor_RefEmitParameters(ConstructorInfo con, object[] constructorArgs,
PropertyInfo[] namedProperties, object[] propertyValues,
FieldInfo[] namedFields, object[] fieldValues)
{
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues, namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
object createdAttribute = assembly.GetCustomAttributes().First();
Assert.Equal(propertyValues[0], createdAttribute.GetType().GetField("PropertyField").GetValue(createdAttribute));
Assert.Equal(fieldValues[0], createdAttribute.GetType().GetField("Field").GetValue(createdAttribute));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[InlineData(nameof(TestAttribute.ReadonlyField))]
[InlineData(nameof(TestAttribute.StaticField))]
[InlineData(nameof(TestAttribute.StaticReadonlyField))]
public void NamedFields_ContainsReadonlyOrStaticField_Works(string name)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { typeof(TestAttribute).GetField(name) };
object[] fieldValues = new object[] { 5 };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
object customAttribute = assembly.GetCustomAttributes().First();
Assert.Equal(fieldValues[0], namedFields[0].GetValue(namedFields[0].IsStatic ? null : customAttribute));
}
[Fact]
public void NamedProperties_StaticProperty_Works()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { typeof(TestAttribute).GetProperty(nameof(TestAttribute.StaticProperty)) };
object[] propertyValues = new object[] { 5 };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
object customAttribute = assembly.GetCustomAttributes().First();
Assert.Equal(propertyValues[0], TestAttribute.StaticProperty);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[InlineData(typeof(PrivateAttribute))]
[InlineData(typeof(NotAnAttribute))]
public static void ClassNotSupportedAsAttribute_DoesNotThrow_DoesNotSet(Type type)
{
ConstructorInfo con = type.GetConstructor(new Type[0]);
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0]);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
Assert.Empty(assembly.GetCustomAttributes());
}
[Fact]
public static void NullConstructor_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0]));
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void StaticConstructor_ThrowsArgumentException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructors(BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Static).First();
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void PrivateConstructor_ThrowsArgumentException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructors(BindingFlags.NonPublic | BindingFlags.Instance).First();
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[InlineData(CallingConventions.Any)]
[InlineData(CallingConventions.VarArgs)]
public static void ConstructorHasNonStandardCallingConvention_ThrowsArgumentException(CallingConventions callingConvention)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
ConstructorBuilder constructorBuilder = typeBuilder.DefineConstructor(MethodAttributes.Public, callingConvention, new Type[0]);
constructorBuilder.GetILGenerator().Emit(OpCodes.Ret);
ConstructorInfo con = typeBuilder.CreateTypeInfo().AsType().GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void NullConstructorArgs_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(int) });
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> NotSupportedObject_Constructor_TestData()
{
yield return new object[] { new int[0, 0] };
yield return new object[] { Enum.GetValues(CreateEnum(typeof(char), 'a')).GetValue(0) };
yield return new object[] { Enum.GetValues(CreateEnum(typeof(bool), true)).GetValue(0) };
}
public static IEnumerable<object[]> FloatEnum_DoubleEnum_TestData()
{
yield return new object[] { Enum.GetValues(CreateEnum(typeof(float), 0.0f)).GetValue(0) };
yield return new object[] { Enum.GetValues(CreateEnum(typeof(double), 0.0)).GetValue(0) };
}
public static IEnumerable<object[]> NotSupportedObject_Others_TestData()
{
yield return new object[] { new Guid() };
yield return new object[] { new int[5, 5] };
}
[Theory]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Netfx doesn't support Enum.GetEnumName for float or double enums.")]
[MemberData(nameof(FloatEnum_DoubleEnum_TestData))]
public void ConstructorArgsContainsFloatEnumOrDoubleEnum_ThrowsArgumentException(object value)
{
NotSupportedObjectInConstructorArgs_ThrowsArgumentException(value);
}
[Theory]
[MemberData(nameof(NotSupportedObject_Constructor_TestData))]
[MemberData(nameof(NotSupportedObject_Others_TestData))]
public static void NotSupportedObjectInConstructorArgs_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(object) });
object[] constructorArgs = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Theory]
[InlineData(new Type[] { typeof(int) }, new object[] { 123, false })]
[InlineData(new Type[] { typeof(int), typeof(bool) }, new object[] { false, 123 })]
[InlineData(new Type[] { typeof(string), typeof(int), typeof(string), typeof(int) }, new object[] { "TestString", 10 })]
public void ConstructorAndConstructorArgsDontMatch_ThrowsArgumentException(Type[] constructorTypes, object[] constructorArgs)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(constructorTypes);
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> IntPtrAttributeTypes_TestData()
{
yield return new object[] { typeof(IntPtr), (IntPtr)1 };
yield return new object[] { typeof(UIntPtr), (UIntPtr)1 };
}
public static IEnumerable<object[]> InvalidAttributeTypes_TestData()
{
yield return new object[] { typeof(Guid), new Guid() };
yield return new object[] { typeof(int[,]), new int[5, 5] };
yield return new object[] { CreateEnum(typeof(char), 'a'), 'a' };
yield return new object[] { CreateEnum(typeof(bool), false), true };
yield return new object[] { CreateEnum(typeof(float), 1.0f), 1.0f };
yield return new object[] { CreateEnum(typeof(double), 1.0), 1.0 };
yield return new object[] { CreateEnum(typeof(IntPtr)), (IntPtr)1 };
yield return new object[] { CreateEnum(typeof(UIntPtr)), (UIntPtr)1 };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in constructorParameters causes a corrupt created binary.")]
[MemberData(nameof(IntPtrAttributeTypes_TestData))]
public void ConstructorParametersContainsIntPtrOrUIntPtrArgument_ThrowsArgumentException(Type type, object value)
{
ConstructorParametersNotSupportedInAttributes_ThrowsArgumentException(type, value);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(InvalidAttributeTypes_TestData))]
public void ConstructorParametersNotSupportedInAttributes_ThrowsArgumentException(Type type, object value)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
ConstructorInfo con = typeBuilder.DefineConstructor(MethodAttributes.Public, CallingConventions.Standard, new Type[] { type });
object[] constructorArgs = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Used to throw a NullReferenceException, see https://github.com/dotnet/runtime/issues/18552.")]
public void NullValueForPrimitiveTypeInConstructorArgs_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(int) });
object[] constructorArgs = new object[] { null };
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> NotSupportedPrimitives_TestData()
{
yield return new object[] { (IntPtr)1 };
yield return new object[] { (UIntPtr)1 };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in constructorArgs causes a corrupt created binary.")]
[MemberData(nameof(NotSupportedPrimitives_TestData))]
public static void NotSupportedPrimitiveInConstructorArgs_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(object) });
object[] constructorArgs = new object[] { value };
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void DynamicTypeInConstructorArgs_ThrowsFileNotFoundExceptionOnCreation()
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
TypeBuilder type = assembly.DefineDynamicModule("DynamicModule").DefineType("DynamicType");
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(object) });
object[] constructorArgs = new object[] { type };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, constructorArgs);
assembly.SetCustomAttribute(attribute);
Assert.Throws<FileNotFoundException>(() => assembly.GetCustomAttributes());
}
[Fact]
public static void NullNamedFields_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("namedFields", () => new CustomAttributeBuilder(con, new object[0], (FieldInfo[])null, new object[0]));
AssertExtensions.Throws<ArgumentNullException>("namedFields", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], null, new object[0]));
}
[Theory]
[MemberData(nameof(InvalidAttributeTypes_TestData))]
public void NamedFields_FieldTypeNotSupportedInAttributes_ThrowsArgumentException(Type type, object value)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
FieldInfo field = typeBuilder.DefineField("Field", type, FieldAttributes.Public);
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { field };
object[] fieldValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
public static IEnumerable<object[]> FieldDoesntBelongToConstructorDeclaringType_TestData()
{
// Different declaring type
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(OtherTestAttribute).GetField(nameof(OtherTestAttribute.Field)) };
// Base class and sub class declaring types
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(SubAttribute).GetField(nameof(SubAttribute.SubField)) };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(FieldDoesntBelongToConstructorDeclaringType_TestData))]
public void NamedFields_FieldDoesntBelongToConstructorDeclaringType_ThrowsArgumentException(ConstructorInfo con, FieldInfo field)
{
FieldInfo[] namedFields = new FieldInfo[] { field };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedFields, new object[] { 5 }));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, new object[] { 5 }));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public void NamedFields_ContainsConstField_ThrowsArgumentException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { typeof(TestAttribute).GetField(nameof(TestAttribute.ConstField)) };
object[] propertyValues = new object[] { 5 };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, propertyValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
// CustomAttributeFormatException is not exposed on .NET Core
Exception ex = Assert.ThrowsAny<Exception>(() => assembly.GetCustomAttributes());
Assert.Equal("System.Reflection.CustomAttributeFormatException", ex.GetType().ToString());
}
[Fact]
public static void NullFieldValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("fieldValues", () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], null));
AssertExtensions.Throws<ArgumentNullException>("fieldValues", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], null));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInNamedFields_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { null };
AssertExtensions.Throws<ArgumentNullException>("namedFields[0]", () => new CustomAttributeBuilder(con, new object[0], namedFields, new object[1]));
AssertExtensions.Throws<ArgumentNullException>("namedFields[0]", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, new object[1]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInFieldValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.TestInt));
object[] fieldValues = new object[] { null };
AssertExtensions.Throws<ArgumentNullException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentNullException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedObject_Others_TestData))]
public static void NotSupportedObjectInFieldValues_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void ZeroCountMultidimensionalArrayInFieldValues_ChangesToZeroCountJaggedArray()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { new int[0, 0] };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
TestAttribute customAttribute = (TestAttribute)assembly.GetCustomAttributes().First();
Array objectField = (Array)customAttribute.ObjectField;
Assert.IsType<int[]>(objectField);
Assert.Equal(0, objectField.Length);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedPrimitives_TestData))]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in fieldValues causes a corrupt created binary.")]
public static void NotSupportedPrimitiveInFieldValues_ThrowsArgumentException(object value)
{
// Used to assert in CustomAttributeBuilder.EmitType(), not writing any CustomAttributeEncoding.
// This created a blob that (probably) generates a CustomAttributeFormatException. In theory, this
// could have been something more uncontrolled, so was fixed. See https://github.com/dotnet/runtime/issues/18553.
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new FieldInfo[0], namedFields, fieldValues));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void DynamicTypeInPropertyValues_ThrowsFileNotFoundExceptionOnCreation()
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
TypeBuilder type = assembly.DefineDynamicModule("DynamicModule").DefineType("DynamicType");
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { type };
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues);
assembly.SetCustomAttribute(attribute);
Assert.Throws<FileNotFoundException>(() => assembly.GetCustomAttributes());
}
[Theory]
[InlineData(new string[] { nameof(TestAttribute.TestInt) }, new object[0], "namedFields, fieldValues")]
[InlineData(new string[] { nameof(TestAttribute.TestInt) }, new object[] { "TestString", 10 }, "namedFields, fieldValues")]
[InlineData(new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { "TestString", 10 }, null)]
[InlineData(new string[] { nameof(TestAttribute.TestStringField) }, new object[] { 10 }, null)]
public void NamedFieldAndFieldValuesDifferentLengths_ThrowsArgumentException(string[] fieldNames, object[] fieldValues, string paramName)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), fieldNames);
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
[Fact]
public static void NullNamedProperties_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("namedProperties", () => new CustomAttributeBuilder(con, new object[0], (PropertyInfo[])null, new object[0]));
AssertExtensions.Throws<ArgumentNullException>("namedProperties", () => new CustomAttributeBuilder(con, new object[0], null, new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void NullPropertyValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("propertyValues", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], null));
AssertExtensions.Throws<ArgumentNullException>("propertyValues", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], null, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInNamedProperties_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { null };
AssertExtensions.Throws<ArgumentNullException>("namedProperties[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[1]));
AssertExtensions.Throws<ArgumentNullException>("namedProperties[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[1], new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void IndexerInNamedProperties_ThrowsCustomAttributeFormatExceptionOnCreation()
{
ConstructorInfo con = typeof(IndexerAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { typeof(IndexerAttribute).GetProperty("Item") };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, new object[] { "abc" });
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
// CustomAttributeFormatException is not exposed on .NET Core
Exception ex = Assert.ThrowsAny<Exception>(() => assembly.GetCustomAttributes());
Assert.Equal("System.Reflection.CustomAttributeFormatException", ex.GetType().ToString());
}
[Theory]
[MemberData(nameof(InvalidAttributeTypes_TestData))]
[MemberData(nameof(IntPtrAttributeTypes_TestData))]
public void NamedProperties_TypeNotSupportedInAttributes_ThrowsArgumentException(Type type, object value)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
PropertyBuilder property = typeBuilder.DefineProperty("Property", PropertyAttributes.None, type, new Type[0]);
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { property };
object[] propertyValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> PropertyDoesntBelongToConstructorDeclaringType_TestData()
{
// Different declaring type
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(OtherTestAttribute).GetProperty(nameof(OtherTestAttribute.Property)) };
// Base class and sub class declaring types
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(SubAttribute).GetProperty(nameof(SubAttribute.SubProperty)) };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(PropertyDoesntBelongToConstructorDeclaringType_TestData))]
public void NamedProperties_PropertyDoesntBelongToConstructorDeclaringType_ThrowsArgumentException(ConstructorInfo con, PropertyInfo property)
{
PropertyInfo[] namedProperties = new PropertyInfo[] { property };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[] { 5 }));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[] { 5 }, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInPropertyValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.TestInt32));
object[] propertyValues = new object[] { null };
AssertExtensions.Throws<ArgumentNullException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentNullException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedObject_Others_TestData))]
public static void NotSupportedObjectInPropertyValues_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void ZeroCountMultidimensionalArrayInPropertyValues_ChangesToZeroCountJaggedArray()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { new int[0, 0] };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
TestAttribute customAttribute = (TestAttribute)assembly.GetCustomAttributes().First();
Array objectProperty = (Array)customAttribute.ObjectProperty;
Assert.IsType<int[]>(objectProperty);
Assert.Equal(0, objectProperty.Length);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedPrimitives_TestData))]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in propertValues causes a corrupt created binary.")]
public static void NotSupportedPrimitiveInPropertyValues_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void DynamicTypeInFieldValues_ThrowsFileNotFoundExceptionOnCreation()
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
TypeBuilder type = assembly.DefineDynamicModule("DynamicModule").DefineType("DynamicType");
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { type };
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues);
assembly.SetCustomAttribute(attribute);
Assert.Throws<FileNotFoundException>(() => assembly.GetCustomAttributes());
}
[Theory]
[InlineData(new string[] { nameof(TestAttribute.TestInt32) }, new object[0], "namedProperties, propertyValues")]
[InlineData(new string[0], new object[] { 10 }, "namedProperties, propertyValues")]
[InlineData(new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { "TestString", 10 }, null)]
[InlineData(new string[] { nameof(TestAttribute.GetOnlyInt32) }, new object[] { "TestString" }, null)]
[InlineData(new string[] { nameof(TestAttribute.GetOnlyString) }, new object[] { "TestString" }, null)]
[InlineData(new string[] { nameof(TestAttribute.TestInt32) }, new object[] { "TestString" }, null)]
public void NamedPropertyAndPropertyValuesDifferentLengths_ThrowsArgumentException(string[] propertyNames, object[] propertyValues, string paramName)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), propertyNames);
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
private static Type CreateEnum(Type underlyingType, params object[] literalValues)
{
ModuleBuilder module = Helpers.DynamicModule();
EnumBuilder enumBuilder = module.DefineEnum("Name", TypeAttributes.Public, underlyingType);
for (int i = 0; i < (literalValues?.Length ?? 0); i++)
{
enumBuilder.DefineLiteral("Value" + i, literalValues[i]);
}
return enumBuilder.CreateTypeInfo().AsType();
}
}
public class OtherTestAttribute : Attribute
{
public int Property { get; set; }
public int Field;
}
class PrivateAttribute : Attribute { }
public class NotAnAttribute { }
public class Primitives : Attribute
{
public Primitives(sbyte sb, byte b, short s, ushort us, int i, uint ui, long l, ulong ul,
SByteEnum sbe, ByteEnum be, ShortEnum se, UShortEnum use, IntEnum ie, UIntEnum uie, LongEnum le, ULongEnum ule,
char c, bool bo, float f, double d,
string str, Type t, int[] arr, object obj)
{
SByteConstructor = sb;
ByteConstructor = b;
ShortConstructor = s;
UShortConstructor = us;
IntConstructor = i;
UIntConstructor = ui;
LongConstructor = l;
ULongConstructor = ul;
SByteEnumConstructor = sbe;
ByteEnumConstructor = be;
ShortEnumConstructor = se;
UShortEnumConstructor = use;
IntEnumConstructor = ie;
UIntEnumConstructor = uie;
LongEnumConstructor = le;
ULongEnumConstructor = ule;
CharConstructor = c;
BoolConstructor = bo;
FloatConstructor = f;
DoubleConstructor = d;
StringConstructor = str;
TypeConstructor = t;
ArrayConstructor = arr;
ObjectConstructor = obj;
}
public sbyte SByteConstructor;
public byte ByteConstructor;
public short ShortConstructor;
public ushort UShortConstructor;
public int IntConstructor;
public uint UIntConstructor;
public long LongConstructor;
public ulong ULongConstructor;
public SByteEnum SByteEnumConstructor;
public ByteEnum ByteEnumConstructor;
public ShortEnum ShortEnumConstructor;
public UShortEnum UShortEnumConstructor;
public IntEnum IntEnumConstructor;
public UIntEnum UIntEnumConstructor;
public LongEnum LongEnumConstructor;
public ULongEnum ULongEnumConstructor;
public char CharConstructor;
public bool BoolConstructor;
public float FloatConstructor;
public double DoubleConstructor;
public string StringConstructor;
public Type TypeConstructor;
public int[] ArrayConstructor;
public object ObjectConstructor;
public sbyte SByteProperty { get; set; }
public byte ByteProperty { get; set; }
public short ShortProperty { get; set; }
public ushort UShortProperty { get; set; }
public int IntProperty { get; set; }
public uint UIntProperty { get; set; }
public long LongProperty { get; set; }
public ulong ULongProperty { get; set; }
public SByteEnum SByteEnumProperty { get; set; }
public ByteEnum ByteEnumProperty { get; set; }
public ShortEnum ShortEnumProperty { get; set; }
public UShortEnum UShortEnumProperty { get; set; }
public IntEnum IntEnumProperty { get; set; }
public UIntEnum UIntEnumProperty { get; set; }
public LongEnum LongEnumProperty { get; set; }
public ULongEnum ULongEnumProperty { get; set; }
public char CharProperty { get; set; }
public bool BoolProperty { get; set; }
public float FloatProperty { get; set; }
public double DoubleProperty { get; set; }
public string StringProperty { get; set; }
public Type TypeProperty { get; set; }
public int[] ArrayProperty { get; set; }
public object ObjectProperty { get; set; }
public sbyte SByteField;
public byte ByteField;
public short ShortField;
public ushort UShortField;
public int IntField;
public uint UIntField;
public long LongField;
public ulong ULongField;
public SByteEnum SByteEnumField;
public ByteEnum ByteEnumField;
public ShortEnum ShortEnumField;
public UShortEnum UShortEnumField;
public IntEnum IntEnumField;
public UIntEnum UIntEnumField;
public LongEnum LongEnumField;
public ULongEnum ULongEnumField;
public char CharField;
public bool BoolField;
public float FloatField;
public double DoubleField;
public string StringField;
public Type TypeField;
public int[] ArrayField;
public object ObjectField;
}
public class IndexerAttribute : Attribute
{
public IndexerAttribute() { }
public string this[string s]
{
get { return s; }
set { }
}
}
public enum SByteEnum : sbyte { }
public enum ByteEnum : byte { }
public enum ShortEnum : short { }
public enum UShortEnum : ushort { }
public enum IntEnum : int { }
public enum UIntEnum : uint { }
public enum LongEnum : long { }
public enum ULongEnum : ulong { }
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using Xunit;
namespace System.Reflection.Emit.Tests
{
public class CustomAttributeBuilderTests
{
public static IEnumerable<object[]> Ctor_TestData()
{
string stringValue1 = "TestString1";
string stringValue2 = "TestString2";
int intValue1 = 10;
int intValue2 = 20;
// 2 ctor, 0 properties, 1 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[0], new object[0],
new object[] { intValue2, null, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue2 },
new object[] { intValue2, null, stringValue1, intValue1 }
};
// 2 ctor, 0 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[0], new object[0],
new object[] { 0, null, stringValue1, intValue1 },
new string[0], new object[0],
new object[] { 0, null, stringValue1, intValue1 }
};
// 0 ctor, 0 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { 0, null, null, 0 },
new string[0], new object[0],
new object[] { 0, null, null, 0 }
};
// 0 ctor, 0 properties, 1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { intValue1, null, null, 0 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue1 },
new object[] { intValue1, null, null, 0 }
};
// 0 ctor, 0 properties, 2 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { intValue1, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { intValue1, stringValue1 },
new object[] { intValue1, stringValue1, null, 0 }
};
// 2 ctor, 0 properties, 2 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[0], new object[0],
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 0 ctor, 0 properties,1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[0], new object[0],
new object[] { 0, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestStringField) }, new object[] { stringValue1 },
new object[] { 0, stringValue1, null, 0 }
};
// 2 ctor, 2 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new object[0], new object[0],
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 2 ctor, 1 property, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32) }, new object[] { intValue2 },
new object[] { intValue2, null, stringValue1, intValue1 },
new object[0], new object[0],
new object[] { intValue2, null, stringValue1, intValue1 }
};
// 0 ctor, 1 property, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestInt32) }, new object[] { intValue2 },
new object[] { intValue2, null, null, 0 },
new object[0], new object[0],
new object[] { intValue2, null, null, 0 }
};
// 0 ctor, 2 properties, 0 fields
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, null, 0 },
new object[0], new object[0],
new object[] { intValue2, stringValue2, null, 0 }
};
// 4 ctor, 0 fields, 2 properties
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int), typeof(string), typeof(int) }), new object[] { stringValue1, intValue1, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[0], new object[0],
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 2 ctor, 2 property, 2 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { intValue2, stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 2 ctor, 1 property, 1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestString) }, new object[] { stringValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue2 },
new object[] { intValue2, stringValue2, stringValue1, intValue1 }
};
// 0 ctor, 2 property, 1 field
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { intValue1, stringValue1 },
new object[] { intValue2, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue2 },
new object[] { intValue2, stringValue1, null, 0 }
};
// 2 ctor, 1 property, 0 field
string shortString = new string('a', 128);
string longString = new string('a', 16384);
yield return new object[]
{
typeof(TestAttribute).GetConstructor(new Type[] { typeof(string), typeof(int) }), new object[] { shortString, intValue1 },
new string[] { nameof(TestAttribute.TestString) }, new object[] { longString },
new object[] { 0, longString, shortString, intValue1 },
new string[0], new object[0],
new object[] { 0, longString, shortString, intValue1 }
};
// 0 ctor, 1 property, 1 field
yield return new object[]
{
typeof(SubAttribute).GetConstructor(new Type[0]), new object[0],
new string[] { nameof(TestAttribute.TestString) }, new object[] { stringValue1 },
new object[] { intValue1, stringValue1, null, 0 },
new string[] { nameof(TestAttribute.TestInt) }, new object[] { intValue1 },
new object[] { intValue1, stringValue1, null, 0 }
};
}
[Theory]
[MemberData(nameof(Ctor_TestData))]
public static void Ctor(ConstructorInfo con, object[] constructorArgs,
string[] propertyNames, object[] propertyValues,
object[] expectedPropertyValues,
string[] fieldNames, object[] fieldValues,
object[] expectedFieldValues)
{
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), propertyNames);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), fieldNames);
void Verify(CustomAttributeBuilder attr)
{
VerifyCustomAttributeBuilder(attr, TestAttribute.AllProperties, expectedPropertyValues, TestAttribute.AllFields, expectedFieldValues);
}
if (namedProperties.Length == 0)
{
if (namedFields.Length == 0)
{
// Use CustomAttributeBuilder(ConstructorInfo, object[])
CustomAttributeBuilder attribute1 = new CustomAttributeBuilder(con, constructorArgs);
Verify(attribute1);
}
// Use CustomAttributeBuilder(ConstructorInfo, object[], FieldInfo[], object[])
CustomAttributeBuilder attribute2 = new CustomAttributeBuilder(con, constructorArgs, namedFields, fieldValues);
Verify(attribute2);
}
if (namedFields.Length == 0)
{
// Use CustomAttributeBuilder(ConstructorInfo, object[], PropertyInfo[], object[])
CustomAttributeBuilder attribute3 = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues);
Verify(attribute3);
}
// Use CustomAttributeBuilder(ConstructorInfo, object[], PropertyInfo[], object[], FieldInfo[], object[])
CustomAttributeBuilder attribute4 = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues, namedFields, fieldValues);
Verify(attribute4);
}
private static void VerifyCustomAttributeBuilder(CustomAttributeBuilder builder,
PropertyInfo[] propertyNames, object[] propertyValues,
FieldInfo[] fieldNames, object[] fieldValues)
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(builder);
object[] customAttributes = assembly.GetCustomAttributes().ToArray();
Assert.Equal(1, customAttributes.Length);
object customAttribute = customAttributes[0];
for (int i = 0; i < fieldNames.Length; ++i)
{
FieldInfo field = typeof(TestAttribute).GetField(fieldNames[i].Name);
Assert.Equal(fieldValues[i], field.GetValue(customAttribute));
}
for (int i = 0; i < propertyNames.Length; ++i)
{
PropertyInfo property = typeof(TestAttribute).GetProperty(propertyNames[i].Name);
Assert.Equal(propertyValues[i], property.GetValue(customAttribute));
}
}
[Fact]
public static void Ctor_AllPrimitives()
{
ConstructorInfo con = typeof(Primitives).GetConstructors()[0];
object[] constructorArgs = new object[]
{
(sbyte)1, (byte)2, (short)3, (ushort)4, 5, (uint)6, (long)7, (ulong)8,
(SByteEnum)9, (ByteEnum)10, (ShortEnum)11, (UShortEnum)12, (IntEnum)13, (UIntEnum)14, (LongEnum)15, (ULongEnum)16,
(char)17, true, 2.0f, 2.1,
"abc", typeof(object), new int[] { 24, 25, 26 }, null
};
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(Primitives), new string[]
{
nameof(Primitives.SByteProperty), nameof(Primitives.ByteProperty), nameof(Primitives.ShortProperty), nameof(Primitives.UShortProperty), nameof(Primitives.IntProperty), nameof(Primitives.UIntProperty), nameof(Primitives.LongProperty), nameof(Primitives.ULongProperty),
nameof(Primitives.SByteEnumProperty), nameof(Primitives.ByteEnumProperty), nameof(Primitives.ShortEnumProperty), nameof(Primitives.UShortEnumProperty), nameof(Primitives.IntEnumProperty), nameof(Primitives.UIntEnumProperty), nameof(Primitives.LongEnumProperty), nameof(Primitives.ULongEnumProperty),
nameof(Primitives.CharProperty), nameof(Primitives.BoolProperty), nameof(Primitives.FloatProperty), nameof(Primitives.DoubleProperty),
nameof(Primitives.StringProperty), nameof(Primitives.TypeProperty), nameof(Primitives.ArrayProperty), nameof(Primitives.ObjectProperty)
});
object[] propertyValues = new object[]
{
(sbyte)27, (byte)28, (short)29, (ushort)30, 31, (uint)32, (long)33, (ulong)34,
(SByteEnum)35, (ByteEnum)36, (ShortEnum)37, (UShortEnum)38, (IntEnum)39, (UIntEnum)40, (LongEnum)41, (ULongEnum)42,
(char)43, false, 4.4f, 4.5,
"def", typeof(bool), new int[] { 48, 49, 50 }, "stringAsObject"
};
FieldInfo[] namedFields = Helpers.GetFields(typeof(Primitives), new string[]
{
nameof(Primitives.SByteField), nameof(Primitives.ByteField), nameof(Primitives.ShortField), nameof(Primitives.UShortField), nameof(Primitives.IntField), nameof(Primitives.UIntField), nameof(Primitives.LongField), nameof(Primitives.ULongField),
nameof(Primitives.SByteEnumField), nameof(Primitives.ByteEnumField), nameof(Primitives.ShortEnumField), nameof(Primitives.UShortEnumField), nameof(Primitives.IntEnumField), nameof(Primitives.UIntEnumField), nameof(Primitives.LongEnumField), nameof(Primitives.ULongEnumField),
nameof(Primitives.CharField), nameof(Primitives.BoolField), nameof(Primitives.FloatField), nameof(Primitives.DoubleField),
nameof(Primitives.StringField), nameof(Primitives.TypeField), nameof(Primitives.ArrayField), nameof(Primitives.ObjectField)
});
object[] fieldValues = new object[]
{
(sbyte)51, (byte)52, (short)53, (ushort)54, 55, (uint)56, (long)57, (ulong)58,
(SByteEnum)59, (ByteEnum)60, (ShortEnum)61, (UShortEnum)62, (IntEnum)63, (UIntEnum)64, (LongEnum)65, (ULongEnum)66,
(char)67, true, 6.8f, 6.9,
null, null, null, 70
};
CustomAttributeBuilder attributeBuilder = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues, namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attributeBuilder);
object[] customAttributes = assembly.GetCustomAttributes().ToArray();
Assert.Equal(1, customAttributes.Length);
Primitives attribute = (Primitives)customAttributes[0];
// Constructor: primitives
Assert.Equal(constructorArgs[0], attribute.SByteConstructor);
Assert.Equal(constructorArgs[1], attribute.ByteConstructor);
Assert.Equal(constructorArgs[2], attribute.ShortConstructor);
Assert.Equal(constructorArgs[3], attribute.UShortConstructor);
Assert.Equal(constructorArgs[4], attribute.IntConstructor);
Assert.Equal(constructorArgs[5], attribute.UIntConstructor);
Assert.Equal(constructorArgs[6], attribute.LongConstructor);
Assert.Equal(constructorArgs[7], attribute.ULongConstructor);
// Constructors: enums
Assert.Equal(constructorArgs[8], attribute.SByteEnumConstructor);
Assert.Equal(constructorArgs[9], attribute.ByteEnumConstructor);
Assert.Equal(constructorArgs[10], attribute.ShortEnumConstructor);
Assert.Equal(constructorArgs[11], attribute.UShortEnumConstructor);
Assert.Equal(constructorArgs[12], attribute.IntEnumConstructor);
Assert.Equal(constructorArgs[13], attribute.UIntEnumConstructor);
Assert.Equal(constructorArgs[14], attribute.LongEnumConstructor);
Assert.Equal(constructorArgs[15], attribute.ULongEnumConstructor);
// Constructors: other primitives
Assert.Equal(constructorArgs[16], attribute.CharConstructor);
Assert.Equal(constructorArgs[17], attribute.BoolConstructor);
Assert.Equal(constructorArgs[18], attribute.FloatConstructor);
Assert.Equal(constructorArgs[19], attribute.DoubleConstructor);
// Constructors: misc
Assert.Equal(constructorArgs[20], attribute.StringConstructor);
Assert.Equal(constructorArgs[21], attribute.TypeConstructor);
Assert.Equal(constructorArgs[22], attribute.ArrayConstructor);
Assert.Equal(constructorArgs[23], attribute.ObjectConstructor);
// Field: primitives
Assert.Equal(fieldValues[0], attribute.SByteField);
Assert.Equal(fieldValues[1], attribute.ByteField);
Assert.Equal(fieldValues[2], attribute.ShortField);
Assert.Equal(fieldValues[3], attribute.UShortField);
Assert.Equal(fieldValues[4], attribute.IntField);
Assert.Equal(fieldValues[5], attribute.UIntField);
Assert.Equal(fieldValues[6], attribute.LongField);
Assert.Equal(fieldValues[7], attribute.ULongField);
// Fields: enums
Assert.Equal(fieldValues[8], attribute.SByteEnumField);
Assert.Equal(fieldValues[9], attribute.ByteEnumField);
Assert.Equal(fieldValues[10], attribute.ShortEnumField);
Assert.Equal(fieldValues[11], attribute.UShortEnumField);
Assert.Equal(fieldValues[12], attribute.IntEnumField);
Assert.Equal(fieldValues[13], attribute.UIntEnumField);
Assert.Equal(fieldValues[14], attribute.LongEnumField);
Assert.Equal(fieldValues[15], attribute.ULongEnumField);
// Fields: other primitives
Assert.Equal(fieldValues[16], attribute.CharField);
Assert.Equal(fieldValues[17], attribute.BoolField);
Assert.Equal(fieldValues[18], attribute.FloatField);
Assert.Equal(fieldValues[19], attribute.DoubleField);
// Fields: misc
Assert.Equal(fieldValues[20], attribute.StringField);
Assert.Equal(fieldValues[21], attribute.TypeField);
Assert.Equal(fieldValues[22], attribute.ArrayField);
Assert.Equal(fieldValues[23], attribute.ObjectField);
// Properties: primitives
Assert.Equal(propertyValues[0], attribute.SByteProperty);
Assert.Equal(propertyValues[1], attribute.ByteProperty);
Assert.Equal(propertyValues[2], attribute.ShortProperty);
Assert.Equal(propertyValues[3], attribute.UShortProperty);
Assert.Equal(propertyValues[4], attribute.IntProperty);
Assert.Equal(propertyValues[5], attribute.UIntProperty);
Assert.Equal(propertyValues[6], attribute.LongProperty);
Assert.Equal(propertyValues[7], attribute.ULongProperty);
// Properties: enums
Assert.Equal(propertyValues[8], attribute.SByteEnumProperty);
Assert.Equal(propertyValues[9], attribute.ByteEnumProperty);
Assert.Equal(propertyValues[10], attribute.ShortEnumProperty);
Assert.Equal(propertyValues[11], attribute.UShortEnumProperty);
Assert.Equal(propertyValues[12], attribute.IntEnumProperty);
Assert.Equal(propertyValues[13], attribute.UIntEnumProperty);
Assert.Equal(propertyValues[14], attribute.LongEnumProperty);
Assert.Equal(propertyValues[15], attribute.ULongEnumProperty);
// Properties: other primitives
Assert.Equal(propertyValues[16], attribute.CharProperty);
Assert.Equal(propertyValues[17], attribute.BoolProperty);
Assert.Equal(propertyValues[18], attribute.FloatProperty);
Assert.Equal(propertyValues[19], attribute.DoubleProperty);
// Properties: misc
Assert.Equal(propertyValues[20], attribute.StringProperty);
Assert.Equal(propertyValues[21], attribute.TypeProperty);
Assert.Equal(propertyValues[22], attribute.ArrayProperty);
Assert.Equal(propertyValues[23], attribute.ObjectProperty);
}
public static IEnumerable<object[]> Ctor_RefEmitParameters_TestData()
{
AssemblyBuilder assemblyBuilder = Helpers.DynamicAssembly();
TypeBuilder typeBuilder = assemblyBuilder.DefineDynamicModule("DynamicModule").DefineType("DynamicType", TypeAttributes.Public, typeof(Attribute));
ConstructorBuilder constructorBuilder = typeBuilder.DefineConstructor(MethodAttributes.Public, CallingConventions.Standard, new Type[0]);
constructorBuilder.GetILGenerator().Emit(OpCodes.Ret);
FieldBuilder fieldBuilder = typeBuilder.DefineField("Field", typeof(int), FieldAttributes.Public);
FieldBuilder fieldBuilderProperty = typeBuilder.DefineField("PropertyField", typeof(int), FieldAttributes.Public);
PropertyBuilder propertyBuilder = typeBuilder.DefineProperty("Property", PropertyAttributes.None, typeof(int), new Type[0]);
MethodBuilder setMethod = typeBuilder.DefineMethod("set_Property", MethodAttributes.Public, typeof(void), new Type[] { typeof(int) });
ILGenerator setMethodGenerator = setMethod.GetILGenerator();
setMethodGenerator.Emit(OpCodes.Ldarg_0);
setMethodGenerator.Emit(OpCodes.Ldarg_1);
setMethodGenerator.Emit(OpCodes.Stfld, fieldBuilderProperty);
setMethodGenerator.Emit(OpCodes.Ret);
propertyBuilder.SetSetMethod(setMethod);
Type createdType = typeBuilder.CreateTypeInfo().AsType();
// ConstructorBuilder, PropertyInfo, FieldInfo
yield return new object[]
{
constructorBuilder, new object[0],
new PropertyInfo[] { createdType.GetProperty(propertyBuilder.Name) }, new object[] { 1 },
new FieldInfo[] { createdType.GetField(fieldBuilder.Name) }, new object[] { 2 }
};
// ConstructorInfo, PropertyBuilder, FieldBuilder
yield return new object[]
{
createdType.GetConstructor(new Type[0]), new object[0],
new PropertyInfo[] { propertyBuilder }, new object[] { 1 },
new FieldInfo[] { fieldBuilder }, new object[] { 2 }
};
// ConstructorBuilder, PropertyBuilder, FieldBuilder
yield return new object[]
{
constructorBuilder, new object[0],
new PropertyInfo[] { propertyBuilder }, new object[] { 1 },
new FieldInfo[] { fieldBuilder }, new object[] { 2 }
};
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(Ctor_RefEmitParameters_TestData))]
public static void Ctor_RefEmitParameters(ConstructorInfo con, object[] constructorArgs,
PropertyInfo[] namedProperties, object[] propertyValues,
FieldInfo[] namedFields, object[] fieldValues)
{
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, constructorArgs, namedProperties, propertyValues, namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
object createdAttribute = assembly.GetCustomAttributes().First();
Assert.Equal(propertyValues[0], createdAttribute.GetType().GetField("PropertyField").GetValue(createdAttribute));
Assert.Equal(fieldValues[0], createdAttribute.GetType().GetField("Field").GetValue(createdAttribute));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[InlineData(nameof(TestAttribute.ReadonlyField))]
[InlineData(nameof(TestAttribute.StaticField))]
[InlineData(nameof(TestAttribute.StaticReadonlyField))]
public void NamedFields_ContainsReadonlyOrStaticField_Works(string name)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { typeof(TestAttribute).GetField(name) };
object[] fieldValues = new object[] { 5 };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
object customAttribute = assembly.GetCustomAttributes().First();
Assert.Equal(fieldValues[0], namedFields[0].GetValue(namedFields[0].IsStatic ? null : customAttribute));
}
[Fact]
public void NamedProperties_StaticProperty_Works()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { typeof(TestAttribute).GetProperty(nameof(TestAttribute.StaticProperty)) };
object[] propertyValues = new object[] { 5 };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
object customAttribute = assembly.GetCustomAttributes().First();
Assert.Equal(propertyValues[0], TestAttribute.StaticProperty);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[InlineData(typeof(PrivateAttribute))]
[InlineData(typeof(NotAnAttribute))]
public static void ClassNotSupportedAsAttribute_DoesNotThrow_DoesNotSet(Type type)
{
ConstructorInfo con = type.GetConstructor(new Type[0]);
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0]);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
Assert.Empty(assembly.GetCustomAttributes());
}
[Fact]
public static void NullConstructor_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0]));
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("con", () => new CustomAttributeBuilder(null, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void StaticConstructor_ThrowsArgumentException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructors(BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Static).First();
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void PrivateConstructor_ThrowsArgumentException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructors(BindingFlags.NonPublic | BindingFlags.Instance).First();
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[InlineData(CallingConventions.Any)]
[InlineData(CallingConventions.VarArgs)]
public static void ConstructorHasNonStandardCallingConvention_ThrowsArgumentException(CallingConventions callingConvention)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
ConstructorBuilder constructorBuilder = typeBuilder.DefineConstructor(MethodAttributes.Public, callingConvention, new Type[0]);
constructorBuilder.GetILGenerator().Emit(OpCodes.Ret);
ConstructorInfo con = typeBuilder.CreateTypeInfo().AsType().GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void NullConstructorArgs_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(int) });
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs", () => new CustomAttributeBuilder(con, null, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> NotSupportedObject_Constructor_TestData()
{
yield return new object[] { new int[0, 0] };
yield return new object[] { Enum.GetValues(CreateEnum(typeof(char), 'a')).GetValue(0) };
yield return new object[] { Enum.GetValues(CreateEnum(typeof(bool), true)).GetValue(0) };
}
public static IEnumerable<object[]> FloatEnum_DoubleEnum_TestData()
{
yield return new object[] { Enum.GetValues(CreateEnum(typeof(float), 0.0f)).GetValue(0) };
yield return new object[] { Enum.GetValues(CreateEnum(typeof(double), 0.0)).GetValue(0) };
}
public static IEnumerable<object[]> NotSupportedObject_Others_TestData()
{
yield return new object[] { new Guid() };
yield return new object[] { new int[5, 5] };
}
[Theory]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Netfx doesn't support Enum.GetEnumName for float or double enums.")]
[MemberData(nameof(FloatEnum_DoubleEnum_TestData))]
public void ConstructorArgsContainsFloatEnumOrDoubleEnum_ThrowsArgumentException(object value)
{
NotSupportedObjectInConstructorArgs_ThrowsArgumentException(value);
}
[Theory]
[MemberData(nameof(NotSupportedObject_Constructor_TestData))]
[MemberData(nameof(NotSupportedObject_Others_TestData))]
public static void NotSupportedObjectInConstructorArgs_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(object) });
object[] constructorArgs = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Theory]
[InlineData(new Type[] { typeof(int) }, new object[] { 123, false })]
[InlineData(new Type[] { typeof(int), typeof(bool) }, new object[] { false, 123 })]
[InlineData(new Type[] { typeof(string), typeof(int), typeof(string), typeof(int) }, new object[] { "TestString", 10 })]
public void ConstructorAndConstructorArgsDontMatch_ThrowsArgumentException(Type[] constructorTypes, object[] constructorArgs)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(constructorTypes);
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> IntPtrAttributeTypes_TestData()
{
yield return new object[] { typeof(IntPtr), (IntPtr)1 };
yield return new object[] { typeof(UIntPtr), (UIntPtr)1 };
}
public static IEnumerable<object[]> InvalidAttributeTypes_TestData()
{
yield return new object[] { typeof(Guid), new Guid() };
yield return new object[] { typeof(int[,]), new int[5, 5] };
yield return new object[] { CreateEnum(typeof(char), 'a'), 'a' };
yield return new object[] { CreateEnum(typeof(bool), false), true };
yield return new object[] { CreateEnum(typeof(float), 1.0f), 1.0f };
yield return new object[] { CreateEnum(typeof(double), 1.0), 1.0 };
yield return new object[] { CreateEnum(typeof(IntPtr)), (IntPtr)1 };
yield return new object[] { CreateEnum(typeof(UIntPtr)), (UIntPtr)1 };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in constructorParameters causes a corrupt created binary.")]
[MemberData(nameof(IntPtrAttributeTypes_TestData))]
public void ConstructorParametersContainsIntPtrOrUIntPtrArgument_ThrowsArgumentException(Type type, object value)
{
ConstructorParametersNotSupportedInAttributes_ThrowsArgumentException(type, value);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(InvalidAttributeTypes_TestData))]
public void ConstructorParametersNotSupportedInAttributes_ThrowsArgumentException(Type type, object value)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
ConstructorInfo con = typeBuilder.DefineConstructor(MethodAttributes.Public, CallingConventions.Standard, new Type[] { type });
object[] constructorArgs = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Used to throw a NullReferenceException, see https://github.com/dotnet/runtime/issues/18552.")]
public void NullValueForPrimitiveTypeInConstructorArgs_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(int) });
object[] constructorArgs = new object[] { null };
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentNullException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> NotSupportedPrimitives_TestData()
{
yield return new object[] { (IntPtr)1 };
yield return new object[] { (UIntPtr)1 };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in constructorArgs causes a corrupt created binary.")]
[MemberData(nameof(NotSupportedPrimitives_TestData))]
public static void NotSupportedPrimitiveInConstructorArgs_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(object) });
object[] constructorArgs = new object[] { value };
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs));
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new FieldInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0]));
AssertExtensions.Throws<ArgumentException>("constructorArgs[0]", () => new CustomAttributeBuilder(con, constructorArgs, new PropertyInfo[0], new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void DynamicTypeInConstructorArgs_ThrowsFileNotFoundExceptionOnCreation()
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
TypeBuilder type = assembly.DefineDynamicModule("DynamicModule").DefineType("DynamicType");
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[] { typeof(object) });
object[] constructorArgs = new object[] { type };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, constructorArgs);
assembly.SetCustomAttribute(attribute);
Assert.Throws<FileNotFoundException>(() => assembly.GetCustomAttributes());
}
[Fact]
public static void NullNamedFields_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("namedFields", () => new CustomAttributeBuilder(con, new object[0], (FieldInfo[])null, new object[0]));
AssertExtensions.Throws<ArgumentNullException>("namedFields", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], null, new object[0]));
}
[Theory]
[MemberData(nameof(InvalidAttributeTypes_TestData))]
public void NamedFields_FieldTypeNotSupportedInAttributes_ThrowsArgumentException(Type type, object value)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
FieldInfo field = typeBuilder.DefineField("Field", type, FieldAttributes.Public);
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { field };
object[] fieldValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
public static IEnumerable<object[]> FieldDoesntBelongToConstructorDeclaringType_TestData()
{
// Different declaring type
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(OtherTestAttribute).GetField(nameof(OtherTestAttribute.Field)) };
// Base class and sub class declaring types
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(SubAttribute).GetField(nameof(SubAttribute.SubField)) };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(FieldDoesntBelongToConstructorDeclaringType_TestData))]
public void NamedFields_FieldDoesntBelongToConstructorDeclaringType_ThrowsArgumentException(ConstructorInfo con, FieldInfo field)
{
FieldInfo[] namedFields = new FieldInfo[] { field };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedFields, new object[] { 5 }));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, new object[] { 5 }));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public void NamedFields_ContainsConstField_ThrowsArgumentException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { typeof(TestAttribute).GetField(nameof(TestAttribute.ConstField)) };
object[] propertyValues = new object[] { 5 };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, propertyValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
// CustomAttributeFormatException is not exposed on .NET Core
Exception ex = Assert.ThrowsAny<Exception>(() => assembly.GetCustomAttributes());
Assert.Equal("System.Reflection.CustomAttributeFormatException", ex.GetType().ToString());
}
[Fact]
public static void NullFieldValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("fieldValues", () => new CustomAttributeBuilder(con, new object[0], new FieldInfo[0], null));
AssertExtensions.Throws<ArgumentNullException>("fieldValues", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], new FieldInfo[0], null));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInNamedFields_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = new FieldInfo[] { null };
AssertExtensions.Throws<ArgumentNullException>("namedFields[0]", () => new CustomAttributeBuilder(con, new object[0], namedFields, new object[1]));
AssertExtensions.Throws<ArgumentNullException>("namedFields[0]", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, new object[1]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInFieldValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.TestInt));
object[] fieldValues = new object[] { null };
AssertExtensions.Throws<ArgumentNullException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentNullException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedObject_Others_TestData))]
public static void NotSupportedObjectInFieldValues_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void ZeroCountMultidimensionalArrayInFieldValues_ChangesToZeroCountJaggedArray()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { new int[0, 0] };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
TestAttribute customAttribute = (TestAttribute)assembly.GetCustomAttributes().First();
Array objectField = (Array)customAttribute.ObjectField;
Assert.IsType<int[]>(objectField);
Assert.Equal(0, objectField.Length);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedPrimitives_TestData))]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in fieldValues causes a corrupt created binary.")]
public static void NotSupportedPrimitiveInFieldValues_ThrowsArgumentException(object value)
{
// Used to assert in CustomAttributeBuilder.EmitType(), not writing any CustomAttributeEncoding.
// This created a blob that (probably) generates a CustomAttributeFormatException. In theory, this
// could have been something more uncontrolled, so was fixed. See https://github.com/dotnet/runtime/issues/18553.
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>("fieldValues[0]", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new FieldInfo[0], namedFields, fieldValues));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void DynamicTypeInPropertyValues_ThrowsFileNotFoundExceptionOnCreation()
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
TypeBuilder type = assembly.DefineDynamicModule("DynamicModule").DefineType("DynamicType");
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), nameof(TestAttribute.ObjectField));
object[] fieldValues = new object[] { type };
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues);
assembly.SetCustomAttribute(attribute);
Assert.Throws<FileNotFoundException>(() => assembly.GetCustomAttributes());
}
[Theory]
[InlineData(new string[] { nameof(TestAttribute.TestInt) }, new object[0], "namedFields, fieldValues")]
[InlineData(new string[] { nameof(TestAttribute.TestInt) }, new object[] { "TestString", 10 }, "namedFields, fieldValues")]
[InlineData(new string[] { nameof(TestAttribute.TestInt), nameof(TestAttribute.TestStringField) }, new object[] { "TestString", 10 }, null)]
[InlineData(new string[] { nameof(TestAttribute.TestStringField) }, new object[] { 10 }, null)]
public void NamedFieldAndFieldValuesDifferentLengths_ThrowsArgumentException(string[] fieldNames, object[] fieldValues, string paramName)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
FieldInfo[] namedFields = Helpers.GetFields(typeof(TestAttribute), fieldNames);
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], namedFields, fieldValues));
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], new object[0], namedFields, fieldValues));
}
[Fact]
public static void NullNamedProperties_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("namedProperties", () => new CustomAttributeBuilder(con, new object[0], (PropertyInfo[])null, new object[0]));
AssertExtensions.Throws<ArgumentNullException>("namedProperties", () => new CustomAttributeBuilder(con, new object[0], null, new object[0], new FieldInfo[0], new object[0]));
}
[Fact]
public static void NullPropertyValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
AssertExtensions.Throws<ArgumentNullException>("propertyValues", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], null));
AssertExtensions.Throws<ArgumentNullException>("propertyValues", () => new CustomAttributeBuilder(con, new object[0], new PropertyInfo[0], null, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInNamedProperties_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { null };
AssertExtensions.Throws<ArgumentNullException>("namedProperties[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[1]));
AssertExtensions.Throws<ArgumentNullException>("namedProperties[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[1], new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void IndexerInNamedProperties_ThrowsCustomAttributeFormatExceptionOnCreation()
{
ConstructorInfo con = typeof(IndexerAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { typeof(IndexerAttribute).GetProperty("Item") };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, new object[] { "abc" });
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
// CustomAttributeFormatException is not exposed on .NET Core
Exception ex = Assert.ThrowsAny<Exception>(() => assembly.GetCustomAttributes());
Assert.Equal("System.Reflection.CustomAttributeFormatException", ex.GetType().ToString());
}
[Theory]
[MemberData(nameof(InvalidAttributeTypes_TestData))]
[MemberData(nameof(IntPtrAttributeTypes_TestData))]
public void NamedProperties_TypeNotSupportedInAttributes_ThrowsArgumentException(Type type, object value)
{
TypeBuilder typeBuilder = Helpers.DynamicType(TypeAttributes.Public);
PropertyBuilder property = typeBuilder.DefineProperty("Property", PropertyAttributes.None, type, new Type[0]);
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = new PropertyInfo[] { property };
object[] propertyValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
public static IEnumerable<object[]> PropertyDoesntBelongToConstructorDeclaringType_TestData()
{
// Different declaring type
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(OtherTestAttribute).GetProperty(nameof(OtherTestAttribute.Property)) };
// Base class and sub class declaring types
yield return new object[] { typeof(TestAttribute).GetConstructor(new Type[0]), typeof(SubAttribute).GetProperty(nameof(SubAttribute.SubProperty)) };
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(PropertyDoesntBelongToConstructorDeclaringType_TestData))]
public void NamedProperties_PropertyDoesntBelongToConstructorDeclaringType_ThrowsArgumentException(ConstructorInfo con, PropertyInfo property)
{
PropertyInfo[] namedProperties = new PropertyInfo[] { property };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[] { 5 }));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, new object[] { 5 }, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void NullObjectInPropertyValues_ThrowsArgumentNullException()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.TestInt32));
object[] propertyValues = new object[] { null };
AssertExtensions.Throws<ArgumentNullException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentNullException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedObject_Others_TestData))]
public static void NotSupportedObjectInPropertyValues_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>(null, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void ZeroCountMultidimensionalArrayInPropertyValues_ChangesToZeroCountJaggedArray()
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { new int[0, 0] };
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues);
AssemblyBuilder assembly = Helpers.DynamicAssembly();
assembly.SetCustomAttribute(attribute);
TestAttribute customAttribute = (TestAttribute)assembly.GetCustomAttributes().First();
Array objectProperty = (Array)customAttribute.ObjectProperty;
Assert.IsType<int[]>(objectProperty);
Assert.Equal(0, objectProperty.Length);
}
[Theory]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
[MemberData(nameof(NotSupportedPrimitives_TestData))]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Coreclr fixed an issue where IntPtr/UIntPtr in propertValues causes a corrupt created binary.")]
public static void NotSupportedPrimitiveInPropertyValues_ThrowsArgumentException(object value)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { value };
AssertExtensions.Throws<ArgumentException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>("propertyValues[0]", () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
[Fact]
[ActiveIssue("https://github.com/dotnet/runtime/issues/2383", TestRuntimes.Mono)]
public static void DynamicTypeInFieldValues_ThrowsFileNotFoundExceptionOnCreation()
{
AssemblyBuilder assembly = Helpers.DynamicAssembly();
TypeBuilder type = assembly.DefineDynamicModule("DynamicModule").DefineType("DynamicType");
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), nameof(TestAttribute.ObjectProperty));
object[] propertyValues = new object[] { type };
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
CustomAttributeBuilder attribute = new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues);
assembly.SetCustomAttribute(attribute);
Assert.Throws<FileNotFoundException>(() => assembly.GetCustomAttributes());
}
[Theory]
[InlineData(new string[] { nameof(TestAttribute.TestInt32) }, new object[0], "namedProperties, propertyValues")]
[InlineData(new string[0], new object[] { 10 }, "namedProperties, propertyValues")]
[InlineData(new string[] { nameof(TestAttribute.TestInt32), nameof(TestAttribute.TestString) }, new object[] { "TestString", 10 }, null)]
[InlineData(new string[] { nameof(TestAttribute.GetOnlyInt32) }, new object[] { "TestString" }, null)]
[InlineData(new string[] { nameof(TestAttribute.GetOnlyString) }, new object[] { "TestString" }, null)]
[InlineData(new string[] { nameof(TestAttribute.TestInt32) }, new object[] { "TestString" }, null)]
public void NamedPropertyAndPropertyValuesDifferentLengths_ThrowsArgumentException(string[] propertyNames, object[] propertyValues, string paramName)
{
ConstructorInfo con = typeof(TestAttribute).GetConstructor(new Type[0]);
PropertyInfo[] namedProperties = Helpers.GetProperties(typeof(TestAttribute), propertyNames);
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues));
AssertExtensions.Throws<ArgumentException>(paramName, () => new CustomAttributeBuilder(con, new object[0], namedProperties, propertyValues, new FieldInfo[0], new object[0]));
}
private static Type CreateEnum(Type underlyingType, params object[] literalValues)
{
ModuleBuilder module = Helpers.DynamicModule();
EnumBuilder enumBuilder = module.DefineEnum("Name", TypeAttributes.Public, underlyingType);
for (int i = 0; i < (literalValues?.Length ?? 0); i++)
{
enumBuilder.DefineLiteral("Value" + i, literalValues[i]);
}
return enumBuilder.CreateTypeInfo().AsType();
}
}
public class OtherTestAttribute : Attribute
{
public int Property { get; set; }
public int Field;
}
class PrivateAttribute : Attribute { }
public class NotAnAttribute { }
public class Primitives : Attribute
{
public Primitives(sbyte sb, byte b, short s, ushort us, int i, uint ui, long l, ulong ul,
SByteEnum sbe, ByteEnum be, ShortEnum se, UShortEnum use, IntEnum ie, UIntEnum uie, LongEnum le, ULongEnum ule,
char c, bool bo, float f, double d,
string str, Type t, int[] arr, object obj)
{
SByteConstructor = sb;
ByteConstructor = b;
ShortConstructor = s;
UShortConstructor = us;
IntConstructor = i;
UIntConstructor = ui;
LongConstructor = l;
ULongConstructor = ul;
SByteEnumConstructor = sbe;
ByteEnumConstructor = be;
ShortEnumConstructor = se;
UShortEnumConstructor = use;
IntEnumConstructor = ie;
UIntEnumConstructor = uie;
LongEnumConstructor = le;
ULongEnumConstructor = ule;
CharConstructor = c;
BoolConstructor = bo;
FloatConstructor = f;
DoubleConstructor = d;
StringConstructor = str;
TypeConstructor = t;
ArrayConstructor = arr;
ObjectConstructor = obj;
}
public sbyte SByteConstructor;
public byte ByteConstructor;
public short ShortConstructor;
public ushort UShortConstructor;
public int IntConstructor;
public uint UIntConstructor;
public long LongConstructor;
public ulong ULongConstructor;
public SByteEnum SByteEnumConstructor;
public ByteEnum ByteEnumConstructor;
public ShortEnum ShortEnumConstructor;
public UShortEnum UShortEnumConstructor;
public IntEnum IntEnumConstructor;
public UIntEnum UIntEnumConstructor;
public LongEnum LongEnumConstructor;
public ULongEnum ULongEnumConstructor;
public char CharConstructor;
public bool BoolConstructor;
public float FloatConstructor;
public double DoubleConstructor;
public string StringConstructor;
public Type TypeConstructor;
public int[] ArrayConstructor;
public object ObjectConstructor;
public sbyte SByteProperty { get; set; }
public byte ByteProperty { get; set; }
public short ShortProperty { get; set; }
public ushort UShortProperty { get; set; }
public int IntProperty { get; set; }
public uint UIntProperty { get; set; }
public long LongProperty { get; set; }
public ulong ULongProperty { get; set; }
public SByteEnum SByteEnumProperty { get; set; }
public ByteEnum ByteEnumProperty { get; set; }
public ShortEnum ShortEnumProperty { get; set; }
public UShortEnum UShortEnumProperty { get; set; }
public IntEnum IntEnumProperty { get; set; }
public UIntEnum UIntEnumProperty { get; set; }
public LongEnum LongEnumProperty { get; set; }
public ULongEnum ULongEnumProperty { get; set; }
public char CharProperty { get; set; }
public bool BoolProperty { get; set; }
public float FloatProperty { get; set; }
public double DoubleProperty { get; set; }
public string StringProperty { get; set; }
public Type TypeProperty { get; set; }
public int[] ArrayProperty { get; set; }
public object ObjectProperty { get; set; }
public sbyte SByteField;
public byte ByteField;
public short ShortField;
public ushort UShortField;
public int IntField;
public uint UIntField;
public long LongField;
public ulong ULongField;
public SByteEnum SByteEnumField;
public ByteEnum ByteEnumField;
public ShortEnum ShortEnumField;
public UShortEnum UShortEnumField;
public IntEnum IntEnumField;
public UIntEnum UIntEnumField;
public LongEnum LongEnumField;
public ULongEnum ULongEnumField;
public char CharField;
public bool BoolField;
public float FloatField;
public double DoubleField;
public string StringField;
public Type TypeField;
public int[] ArrayField;
public object ObjectField;
}
public class IndexerAttribute : Attribute
{
public IndexerAttribute() { }
public string this[string s]
{
get { return s; }
set { }
}
}
public enum SByteEnum : sbyte { }
public enum ByteEnum : byte { }
public enum ShortEnum : short { }
public enum UShortEnum : ushort { }
public enum IntEnum : int { }
public enum UIntEnum : uint { }
public enum LongEnum : long { }
public enum ULongEnum : ulong { }
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/PInvokeMarshal.Unix.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.CompilerServices;
using System.Security;
namespace System.Runtime.InteropServices
{
/// <summary>
/// This PInvokeMarshal class should provide full public Marshal
/// implementation for all things related to P/Invoke marshalling
/// </summary>
public partial class PInvokeMarshal
{
public static void SaveLastError()
{
t_lastError = Interop.Sys.GetErrNo();
}
public static void ClearLastError()
{
Interop.Sys.SetErrNo(0);
}
#region String marshalling
public static unsafe int ConvertMultiByteToWideChar(byte* multiByteStr,
int multiByteLen,
char* wideCharStr,
int wideCharLen)
{
return System.Text.Encoding.UTF8.GetChars(multiByteStr, multiByteLen, wideCharStr, wideCharLen);
}
public static unsafe int ConvertWideCharToMultiByte(char* wideCharStr,
int wideCharLen,
byte* multiByteStr,
int multiByteLen,
bool bestFit,
bool throwOnUnmappableChar)
{
return System.Text.Encoding.UTF8.GetBytes(wideCharStr, wideCharLen, multiByteStr, multiByteLen);
}
public static unsafe int ConvertWideCharToMultiByte(char* wideCharStr,
int wideCharLen,
byte* multiByteStr,
int multiByteLen)
{
return System.Text.Encoding.UTF8.GetBytes(wideCharStr, wideCharLen, multiByteStr, multiByteLen);
}
public static unsafe int GetByteCount(char* wideCharStr, int wideCharLen)
{
return System.Text.Encoding.UTF8.GetByteCount(wideCharStr, wideCharLen);
}
public static unsafe int GetCharCount(byte* multiByteStr, int multiByteLen)
{
return System.Text.Encoding.UTF8.GetCharCount(multiByteStr, multiByteLen);
}
#endregion
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.CompilerServices;
using System.Security;
namespace System.Runtime.InteropServices
{
/// <summary>
/// This PInvokeMarshal class should provide full public Marshal
/// implementation for all things related to P/Invoke marshalling
/// </summary>
public partial class PInvokeMarshal
{
public static void SaveLastError()
{
t_lastError = Interop.Sys.GetErrNo();
}
public static void ClearLastError()
{
Interop.Sys.SetErrNo(0);
}
#region String marshalling
public static unsafe int ConvertMultiByteToWideChar(byte* multiByteStr,
int multiByteLen,
char* wideCharStr,
int wideCharLen)
{
return System.Text.Encoding.UTF8.GetChars(multiByteStr, multiByteLen, wideCharStr, wideCharLen);
}
public static unsafe int ConvertWideCharToMultiByte(char* wideCharStr,
int wideCharLen,
byte* multiByteStr,
int multiByteLen,
bool bestFit,
bool throwOnUnmappableChar)
{
return System.Text.Encoding.UTF8.GetBytes(wideCharStr, wideCharLen, multiByteStr, multiByteLen);
}
public static unsafe int ConvertWideCharToMultiByte(char* wideCharStr,
int wideCharLen,
byte* multiByteStr,
int multiByteLen)
{
return System.Text.Encoding.UTF8.GetBytes(wideCharStr, wideCharLen, multiByteStr, multiByteLen);
}
public static unsafe int GetByteCount(char* wideCharStr, int wideCharLen)
{
return System.Text.Encoding.UTF8.GetByteCount(wideCharStr, wideCharLen);
}
public static unsafe int GetCharCount(byte* multiByteStr, int multiByteLen)
{
return System.Text.Encoding.UTF8.GetCharCount(multiByteStr, multiByteLen);
}
#endregion
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/HardwareIntrinsics/General/Vector128_1/AsVector.Int64.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Linq;
using System.Numerics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void AsVectorInt64()
{
var test = new VectorAs__AsVectorInt64();
// Validates basic functionality works
test.RunBasicScenario();
// Validates calling via reflection works
test.RunReflectionScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorAs__AsVectorInt64
{
private static readonly int LargestVectorSize = 16;
private static readonly int VectorElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int NumericsElementCount = Unsafe.SizeOf<Vector<Int64>>() / sizeof(Int64);
public bool Succeeded { get; set; } = true;
public void RunBasicScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario));
Vector128<Int64> value;
value = Vector128.Create((long)TestLibrary.Generator.GetInt64());
Vector<Int64> result = value.AsVector();
ValidateResult(result, value);
value = result.AsVector128();
ValidateResult(value, result);
}
public void RunReflectionScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario));
Vector128<Int64> value;
value = Vector128.Create((long)TestLibrary.Generator.GetInt64());
object Result = typeof(Vector128)
.GetMethod(nameof(Vector128.AsVector))
.MakeGenericMethod(typeof(Int64))
.Invoke(null, new object[] { value });
ValidateResult((Vector<Int64>)(Result), value);
value = (Vector128<Int64>)typeof(Vector128)
.GetMethods()
.Where((methodInfo) => {
if (methodInfo.Name == nameof(Vector128.AsVector128))
{
var parameters = methodInfo.GetParameters();
return (parameters.Length == 1) &&
(parameters[0].ParameterType.IsGenericType) &&
(parameters[0].ParameterType.GetGenericTypeDefinition() == typeof(Vector<>));
}
return false;
})
.Single()
.MakeGenericMethod(typeof(Int64))
.Invoke(null, new object[] { Result });
ValidateResult(value, (Vector<Int64>)(Result));
}
private void ValidateResult(Vector<Int64> result, Vector128<Int64> value, [CallerMemberName] string method = "")
{
Int64[] resultElements = new Int64[NumericsElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref resultElements[0]), result);
Int64[] valueElements = new Int64[VectorElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref valueElements[0]), value);
ValidateResult(resultElements, valueElements, method);
}
private void ValidateResult(Vector128<Int64> result, Vector<Int64> value, [CallerMemberName] string method = "")
{
Int64[] resultElements = new Int64[VectorElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref resultElements[0]), result);
Int64[] valueElements = new Int64[NumericsElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref valueElements[0]), value);
ValidateResult(resultElements, valueElements, method);
}
private void ValidateResult(Int64[] resultElements, Int64[] valueElements, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (resultElements.Length <= valueElements.Length)
{
for (var i = 0; i < resultElements.Length; i++)
{
if (resultElements[i] != valueElements[i])
{
succeeded = false;
break;
}
}
}
else
{
for (var i = 0; i < valueElements.Length; i++)
{
if (resultElements[i] != valueElements[i])
{
succeeded = false;
break;
}
}
for (var i = valueElements.Length; i < resultElements.Length; i++)
{
if (resultElements[i] != default)
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"Vector128<Int64>.AsVector: {method} failed:");
TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", valueElements)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Linq;
using System.Numerics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void AsVectorInt64()
{
var test = new VectorAs__AsVectorInt64();
// Validates basic functionality works
test.RunBasicScenario();
// Validates calling via reflection works
test.RunReflectionScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorAs__AsVectorInt64
{
private static readonly int LargestVectorSize = 16;
private static readonly int VectorElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int NumericsElementCount = Unsafe.SizeOf<Vector<Int64>>() / sizeof(Int64);
public bool Succeeded { get; set; } = true;
public void RunBasicScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario));
Vector128<Int64> value;
value = Vector128.Create((long)TestLibrary.Generator.GetInt64());
Vector<Int64> result = value.AsVector();
ValidateResult(result, value);
value = result.AsVector128();
ValidateResult(value, result);
}
public void RunReflectionScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario));
Vector128<Int64> value;
value = Vector128.Create((long)TestLibrary.Generator.GetInt64());
object Result = typeof(Vector128)
.GetMethod(nameof(Vector128.AsVector))
.MakeGenericMethod(typeof(Int64))
.Invoke(null, new object[] { value });
ValidateResult((Vector<Int64>)(Result), value);
value = (Vector128<Int64>)typeof(Vector128)
.GetMethods()
.Where((methodInfo) => {
if (methodInfo.Name == nameof(Vector128.AsVector128))
{
var parameters = methodInfo.GetParameters();
return (parameters.Length == 1) &&
(parameters[0].ParameterType.IsGenericType) &&
(parameters[0].ParameterType.GetGenericTypeDefinition() == typeof(Vector<>));
}
return false;
})
.Single()
.MakeGenericMethod(typeof(Int64))
.Invoke(null, new object[] { Result });
ValidateResult(value, (Vector<Int64>)(Result));
}
private void ValidateResult(Vector<Int64> result, Vector128<Int64> value, [CallerMemberName] string method = "")
{
Int64[] resultElements = new Int64[NumericsElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref resultElements[0]), result);
Int64[] valueElements = new Int64[VectorElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref valueElements[0]), value);
ValidateResult(resultElements, valueElements, method);
}
private void ValidateResult(Vector128<Int64> result, Vector<Int64> value, [CallerMemberName] string method = "")
{
Int64[] resultElements = new Int64[VectorElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref resultElements[0]), result);
Int64[] valueElements = new Int64[NumericsElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref valueElements[0]), value);
ValidateResult(resultElements, valueElements, method);
}
private void ValidateResult(Int64[] resultElements, Int64[] valueElements, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (resultElements.Length <= valueElements.Length)
{
for (var i = 0; i < resultElements.Length; i++)
{
if (resultElements[i] != valueElements[i])
{
succeeded = false;
break;
}
}
}
else
{
for (var i = 0; i < valueElements.Length; i++)
{
if (resultElements[i] != valueElements[i])
{
succeeded = false;
break;
}
}
for (var i = valueElements.Length; i < resultElements.Length; i++)
{
if (resultElements[i] != default)
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"Vector128<Int64>.AsVector: {method} failed:");
TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", valueElements)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/ZipHigh.Vector128.SByte.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ZipHigh_Vector128_SByte()
{
var test = new SimpleBinaryOpTest__ZipHigh_Vector128_SByte();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__ZipHigh_Vector128_SByte
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(SByte[] inArray1, SByte[] inArray2, SByte[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<SByte> _fld1;
public Vector128<SByte> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__ZipHigh_Vector128_SByte testClass)
{
var result = AdvSimd.Arm64.ZipHigh(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__ZipHigh_Vector128_SByte testClass)
{
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static SByte[] _data1 = new SByte[Op1ElementCount];
private static SByte[] _data2 = new SByte[Op2ElementCount];
private static Vector128<SByte> _clsVar1;
private static Vector128<SByte> _clsVar2;
private Vector128<SByte> _fld1;
private Vector128<SByte> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__ZipHigh_Vector128_SByte()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
}
public SimpleBinaryOpTest__ZipHigh_Vector128_SByte()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
_dataTable = new DataTable(_data1, _data2, new SByte[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.Arm64.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.Arm64.ZipHigh(
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.ZipHigh), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.ZipHigh), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.Arm64.ZipHigh(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<SByte>* pClsVar1 = &_clsVar1)
fixed (Vector128<SByte>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pClsVar1)),
AdvSimd.LoadVector128((SByte*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr);
var result = AdvSimd.Arm64.ZipHigh(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr));
var result = AdvSimd.Arm64.ZipHigh(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__ZipHigh_Vector128_SByte();
var result = AdvSimd.Arm64.ZipHigh(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__ZipHigh_Vector128_SByte();
fixed (Vector128<SByte>* pFld1 = &test._fld1)
fixed (Vector128<SByte>* pFld2 = &test._fld2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.Arm64.ZipHigh(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.ZipHigh(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(&test._fld1)),
AdvSimd.LoadVector128((SByte*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<SByte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
int index = 0;
int half = RetElementCount / 2;
for (var i = 0; i < RetElementCount; i+=2, index++)
{
if (result[i] != left[index+half] || result[i+1] != right[index+half])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.ZipHigh)}<SByte>(Vector128<SByte>, Vector128<SByte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ZipHigh_Vector128_SByte()
{
var test = new SimpleBinaryOpTest__ZipHigh_Vector128_SByte();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__ZipHigh_Vector128_SByte
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(SByte[] inArray1, SByte[] inArray2, SByte[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<SByte> _fld1;
public Vector128<SByte> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__ZipHigh_Vector128_SByte testClass)
{
var result = AdvSimd.Arm64.ZipHigh(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__ZipHigh_Vector128_SByte testClass)
{
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static SByte[] _data1 = new SByte[Op1ElementCount];
private static SByte[] _data2 = new SByte[Op2ElementCount];
private static Vector128<SByte> _clsVar1;
private static Vector128<SByte> _clsVar2;
private Vector128<SByte> _fld1;
private Vector128<SByte> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__ZipHigh_Vector128_SByte()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
}
public SimpleBinaryOpTest__ZipHigh_Vector128_SByte()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
_dataTable = new DataTable(_data1, _data2, new SByte[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.Arm64.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.Arm64.ZipHigh(
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.ZipHigh), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.ZipHigh), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.Arm64.ZipHigh(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<SByte>* pClsVar1 = &_clsVar1)
fixed (Vector128<SByte>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pClsVar1)),
AdvSimd.LoadVector128((SByte*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr);
var result = AdvSimd.Arm64.ZipHigh(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr));
var result = AdvSimd.Arm64.ZipHigh(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__ZipHigh_Vector128_SByte();
var result = AdvSimd.Arm64.ZipHigh(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__ZipHigh_Vector128_SByte();
fixed (Vector128<SByte>* pFld1 = &test._fld1)
fixed (Vector128<SByte>* pFld2 = &test._fld2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.Arm64.ZipHigh(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.ZipHigh(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.Arm64.ZipHigh(
AdvSimd.LoadVector128((SByte*)(&test._fld1)),
AdvSimd.LoadVector128((SByte*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<SByte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
int index = 0;
int half = RetElementCount / 2;
for (var i = 0; i < RetElementCount; i+=2, index++)
{
if (result[i] != left[index+half] || result[i+1] != right[index+half])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.ZipHigh)}<SByte>(Vector128<SByte>, Vector128<SByte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/mono/mono/metadata/w32event-unix.c | /**
* \file
* Runtime support for managed Event on Unix
*
* Author:
* Ludovic Henry ([email protected])
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "w32event.h"
#include "w32error.h"
#include "mono/utils/mono-error-internals.h"
#include "mono/utils/mono-logger-internals.h"
#include "mono/metadata/handle.h"
#include "mono/metadata/object-internals.h"
#include "mono/metadata/w32handle.h"
#include "icall-decl.h"
#define MAX_PATH 260
typedef struct {
gboolean manual;
guint32 set_count;
} MonoW32HandleEvent;
static gpointer event_create (gboolean manual, gboolean initial);
static gint32 event_handle_signal (MonoW32Handle *handle_data)
{
MonoW32HandleEvent *event_handle;
event_handle = (MonoW32HandleEvent*) handle_data->specific;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: signalling %s handle %p",
__func__, mono_w32handle_get_typename (handle_data->type), handle_data);
if (!event_handle->manual) {
event_handle->set_count = 1;
mono_w32handle_set_signal_state (handle_data, TRUE, FALSE);
} else {
mono_w32handle_set_signal_state (handle_data, TRUE, TRUE);
}
return MONO_W32HANDLE_WAIT_RET_SUCCESS_0;
}
static gboolean event_handle_own (MonoW32Handle *handle_data, gboolean *abandoned)
{
MonoW32HandleEvent *event_handle;
*abandoned = FALSE;
event_handle = (MonoW32HandleEvent*) handle_data->specific;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: owning %s handle %p",
__func__, mono_w32handle_get_typename (handle_data->type), handle_data);
if (!event_handle->manual) {
g_assert (event_handle->set_count > 0);
event_handle->set_count --;
if (event_handle->set_count == 0)
mono_w32handle_set_signal_state (handle_data, FALSE, FALSE);
}
return TRUE;
}
static void event_details (MonoW32Handle *handle_data)
{
MonoW32HandleEvent *event = (MonoW32HandleEvent *)handle_data->specific;
g_print ("manual: %s, set_count: %d",
event->manual ? "TRUE" : "FALSE", event->set_count);
}
static const gchar* event_typename (void)
{
return "Event";
}
static gsize event_typesize (void)
{
return sizeof (MonoW32HandleEvent);
}
void
mono_w32event_init (void)
{
static const MonoW32HandleOps event_ops = {
NULL, /* close */
event_handle_signal, /* signal */
event_handle_own, /* own */
NULL, /* is_owned */
NULL, /* special_wait */
NULL, /* prewait */
event_details, /* details */
event_typename, /* typename */
event_typesize, /* typesize */
};
mono_w32handle_register_ops (MONO_W32TYPE_EVENT, &event_ops);
mono_w32handle_register_capabilities (MONO_W32TYPE_EVENT,
(MonoW32HandleCapability)(MONO_W32HANDLE_CAP_WAIT | MONO_W32HANDLE_CAP_SIGNAL));
}
gpointer
mono_w32event_create (gboolean manual, gboolean initial)
{
/* Need to blow away any old errors here, because code tests
* for ERROR_ALREADY_EXISTS on success (!) to see if an event
* was freshly created */
mono_w32error_set_last (ERROR_SUCCESS);
gpointer handle = event_create (manual, initial);
gint32 win32error = mono_w32error_get_last ();
g_assert ((win32error != ERROR_SUCCESS) == !handle);
return handle;
}
gboolean
mono_w32event_close (gpointer handle)
{
return mono_w32handle_close (handle);
}
static gpointer event_handle_create (MonoW32HandleEvent *event_handle, MonoW32Type type, gboolean manual, gboolean initial)
{
MonoW32Handle *handle_data;
gpointer handle;
event_handle->manual = manual;
event_handle->set_count = (initial && !manual) ? 1 : 0;
handle = mono_w32handle_new (type, event_handle);
if (handle == INVALID_HANDLE_VALUE) {
g_warning ("%s: error creating %s handle",
__func__, mono_w32handle_get_typename (type));
mono_w32error_set_last (ERROR_GEN_FAILURE);
return NULL;
}
if (!mono_w32handle_lookup_and_ref (handle, &handle_data))
g_error ("%s: unkown handle %p", __func__, handle);
if (handle_data->type != type)
g_error ("%s: unknown event handle %p", __func__, handle);
mono_w32handle_lock (handle_data);
if (initial)
mono_w32handle_set_signal_state (handle_data, TRUE, FALSE);
mono_w32handle_unlock (handle_data);
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: created %s handle %p",
__func__, mono_w32handle_get_typename (type), handle);
mono_w32handle_unref (handle_data);
return handle;
}
static gpointer event_create (gboolean manual, gboolean initial)
{
MonoW32HandleEvent event_handle;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: creating %s handle",
__func__, mono_w32handle_get_typename (MONO_W32TYPE_EVENT));
return event_handle_create (&event_handle, MONO_W32TYPE_EVENT, manual, initial);
}
void
mono_w32event_set (gpointer handle)
{
MonoW32Handle *handle_data;
MonoW32HandleEvent *event_handle;
if (!mono_w32handle_lookup_and_ref (handle, &handle_data)) {
g_warning ("%s: unkown handle %p", __func__, handle);
mono_w32error_set_last (ERROR_INVALID_HANDLE);
return;
}
if (handle_data->type != MONO_W32TYPE_EVENT) {
g_warning ("%s: unkown event handle %p", __func__, handle);
mono_w32error_set_last (ERROR_INVALID_HANDLE);
mono_w32handle_unref (handle_data);
return;
}
event_handle = (MonoW32HandleEvent*) handle_data->specific;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: setting %s handle %p",
__func__, mono_w32handle_get_typename (handle_data->type), handle);
mono_w32handle_lock (handle_data);
if (!event_handle->manual) {
event_handle->set_count = 1;
mono_w32handle_set_signal_state (handle_data, TRUE, FALSE);
} else {
mono_w32handle_set_signal_state (handle_data, TRUE, TRUE);
}
mono_w32handle_unlock (handle_data);
mono_w32handle_unref (handle_data);
}
| /**
* \file
* Runtime support for managed Event on Unix
*
* Author:
* Ludovic Henry ([email protected])
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "w32event.h"
#include "w32error.h"
#include "mono/utils/mono-error-internals.h"
#include "mono/utils/mono-logger-internals.h"
#include "mono/metadata/handle.h"
#include "mono/metadata/object-internals.h"
#include "mono/metadata/w32handle.h"
#include "icall-decl.h"
#define MAX_PATH 260
typedef struct {
gboolean manual;
guint32 set_count;
} MonoW32HandleEvent;
static gpointer event_create (gboolean manual, gboolean initial);
static gint32 event_handle_signal (MonoW32Handle *handle_data)
{
MonoW32HandleEvent *event_handle;
event_handle = (MonoW32HandleEvent*) handle_data->specific;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: signalling %s handle %p",
__func__, mono_w32handle_get_typename (handle_data->type), handle_data);
if (!event_handle->manual) {
event_handle->set_count = 1;
mono_w32handle_set_signal_state (handle_data, TRUE, FALSE);
} else {
mono_w32handle_set_signal_state (handle_data, TRUE, TRUE);
}
return MONO_W32HANDLE_WAIT_RET_SUCCESS_0;
}
static gboolean event_handle_own (MonoW32Handle *handle_data, gboolean *abandoned)
{
MonoW32HandleEvent *event_handle;
*abandoned = FALSE;
event_handle = (MonoW32HandleEvent*) handle_data->specific;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: owning %s handle %p",
__func__, mono_w32handle_get_typename (handle_data->type), handle_data);
if (!event_handle->manual) {
g_assert (event_handle->set_count > 0);
event_handle->set_count --;
if (event_handle->set_count == 0)
mono_w32handle_set_signal_state (handle_data, FALSE, FALSE);
}
return TRUE;
}
static void event_details (MonoW32Handle *handle_data)
{
MonoW32HandleEvent *event = (MonoW32HandleEvent *)handle_data->specific;
g_print ("manual: %s, set_count: %d",
event->manual ? "TRUE" : "FALSE", event->set_count);
}
static const gchar* event_typename (void)
{
return "Event";
}
static gsize event_typesize (void)
{
return sizeof (MonoW32HandleEvent);
}
void
mono_w32event_init (void)
{
static const MonoW32HandleOps event_ops = {
NULL, /* close */
event_handle_signal, /* signal */
event_handle_own, /* own */
NULL, /* is_owned */
NULL, /* special_wait */
NULL, /* prewait */
event_details, /* details */
event_typename, /* typename */
event_typesize, /* typesize */
};
mono_w32handle_register_ops (MONO_W32TYPE_EVENT, &event_ops);
mono_w32handle_register_capabilities (MONO_W32TYPE_EVENT,
(MonoW32HandleCapability)(MONO_W32HANDLE_CAP_WAIT | MONO_W32HANDLE_CAP_SIGNAL));
}
gpointer
mono_w32event_create (gboolean manual, gboolean initial)
{
/* Need to blow away any old errors here, because code tests
* for ERROR_ALREADY_EXISTS on success (!) to see if an event
* was freshly created */
mono_w32error_set_last (ERROR_SUCCESS);
gpointer handle = event_create (manual, initial);
gint32 win32error = mono_w32error_get_last ();
g_assert ((win32error != ERROR_SUCCESS) == !handle);
return handle;
}
gboolean
mono_w32event_close (gpointer handle)
{
return mono_w32handle_close (handle);
}
static gpointer event_handle_create (MonoW32HandleEvent *event_handle, MonoW32Type type, gboolean manual, gboolean initial)
{
MonoW32Handle *handle_data;
gpointer handle;
event_handle->manual = manual;
event_handle->set_count = (initial && !manual) ? 1 : 0;
handle = mono_w32handle_new (type, event_handle);
if (handle == INVALID_HANDLE_VALUE) {
g_warning ("%s: error creating %s handle",
__func__, mono_w32handle_get_typename (type));
mono_w32error_set_last (ERROR_GEN_FAILURE);
return NULL;
}
if (!mono_w32handle_lookup_and_ref (handle, &handle_data))
g_error ("%s: unkown handle %p", __func__, handle);
if (handle_data->type != type)
g_error ("%s: unknown event handle %p", __func__, handle);
mono_w32handle_lock (handle_data);
if (initial)
mono_w32handle_set_signal_state (handle_data, TRUE, FALSE);
mono_w32handle_unlock (handle_data);
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: created %s handle %p",
__func__, mono_w32handle_get_typename (type), handle);
mono_w32handle_unref (handle_data);
return handle;
}
static gpointer event_create (gboolean manual, gboolean initial)
{
MonoW32HandleEvent event_handle;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: creating %s handle",
__func__, mono_w32handle_get_typename (MONO_W32TYPE_EVENT));
return event_handle_create (&event_handle, MONO_W32TYPE_EVENT, manual, initial);
}
void
mono_w32event_set (gpointer handle)
{
MonoW32Handle *handle_data;
MonoW32HandleEvent *event_handle;
if (!mono_w32handle_lookup_and_ref (handle, &handle_data)) {
g_warning ("%s: unkown handle %p", __func__, handle);
mono_w32error_set_last (ERROR_INVALID_HANDLE);
return;
}
if (handle_data->type != MONO_W32TYPE_EVENT) {
g_warning ("%s: unkown event handle %p", __func__, handle);
mono_w32error_set_last (ERROR_INVALID_HANDLE);
mono_w32handle_unref (handle_data);
return;
}
event_handle = (MonoW32HandleEvent*) handle_data->specific;
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_EVENT, "%s: setting %s handle %p",
__func__, mono_w32handle_get_typename (handle_data->type), handle);
mono_w32handle_lock (handle_data);
if (!event_handle->manual) {
event_handle->set_count = 1;
mono_w32handle_set_signal_state (handle_data, TRUE, FALSE);
} else {
mono_w32handle_set_signal_state (handle_data, TRUE, TRUE);
}
mono_w32handle_unlock (handle_data);
mono_w32handle_unref (handle_data);
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/mono/mono/tests/abort-tests.cs | using System;
using System.Threading;
public class Tests {
public static void Test1 ()
{
bool called_finally = false;
bool failed_abort = false;
bool finished = false;
Thread thr = new Thread (() => {
try {
try {
Thread.CurrentThread.Abort ();
} finally {
called_finally = true;
Thread.CurrentThread.Abort ();
failed_abort = true;
}
} catch (ThreadAbortException) {
Thread.ResetAbort ();
}
finished = true;
});
thr.Start ();
thr.Join ();
if (!called_finally)
Environment.Exit (1);
if (failed_abort)
Environment.Exit (2);
if (!finished)
Environment.Exit (3);
}
public static void Main ()
{
Test1 ();
Console.WriteLine ("done, all things good");
}
}
| using System;
using System.Threading;
public class Tests {
public static void Test1 ()
{
bool called_finally = false;
bool failed_abort = false;
bool finished = false;
Thread thr = new Thread (() => {
try {
try {
Thread.CurrentThread.Abort ();
} finally {
called_finally = true;
Thread.CurrentThread.Abort ();
failed_abort = true;
}
} catch (ThreadAbortException) {
Thread.ResetAbort ();
}
finished = true;
});
thr.Start ();
thr.Join ();
if (!called_finally)
Environment.Exit (1);
if (failed_abort)
Environment.Exit (2);
if (!finished)
Environment.Exit (3);
}
public static void Main ()
{
Test1 ();
Console.WriteLine ("done, all things good");
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b51817/b51817.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/Interop/COM/ServerContracts/ServerGuids.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Server.Contract
{
/// <summary>
/// Guids for all COM Servers
/// </summary>
internal sealed class Guids
{
public const string NumericTesting = "53169A33-E85D-4E3C-B668-24E438D0929B";
public const string ArrayTesting = "B99ABE6A-DFF6-440F-BFB6-55179B8FE18E";
public const string StringTesting = "C73C83E8-51A2-47F8-9B5C-4284458E47A6";
public const string ErrorMarshalTesting = "71CF5C45-106C-4B32-B418-43A463C6041F";
public const string DispatchTesting = "0F8ACD0C-ECE0-4F2A-BD1B-6BFCA93A0726";
public const string EventTesting = "4DBD9B61-E372-499F-84DE-EFC70AA8A009";
public const string AggregationTesting = "4CEFE36D-F377-4B6E-8C34-819A8BB9CB04";
public const string ColorTesting = "C222F472-DA5A-4FC6-9321-92F4F7053A65";
public const string LicenseTesting = "66DB7882-E2B0-471D-92C7-B2B52A0EA535";
public const string DefaultInterfaceTesting = "FAEF42AE-C1A4-419F-A912-B768AC2679EA";
public const string ConsumeNETServerTesting = "DE4ACF53-5957-4D31-8BE2-EA6C80683246";
public const string InspectableTesting = "CE137261-6F19-44F5-A449-EF963B3F987E";
public const string TrackMyLifetimeTesting = "4F54231D-9E11-4C0B-8E0B-2EBD8B0E5811";
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Server.Contract
{
/// <summary>
/// Guids for all COM Servers
/// </summary>
internal sealed class Guids
{
public const string NumericTesting = "53169A33-E85D-4E3C-B668-24E438D0929B";
public const string ArrayTesting = "B99ABE6A-DFF6-440F-BFB6-55179B8FE18E";
public const string StringTesting = "C73C83E8-51A2-47F8-9B5C-4284458E47A6";
public const string ErrorMarshalTesting = "71CF5C45-106C-4B32-B418-43A463C6041F";
public const string DispatchTesting = "0F8ACD0C-ECE0-4F2A-BD1B-6BFCA93A0726";
public const string EventTesting = "4DBD9B61-E372-499F-84DE-EFC70AA8A009";
public const string AggregationTesting = "4CEFE36D-F377-4B6E-8C34-819A8BB9CB04";
public const string ColorTesting = "C222F472-DA5A-4FC6-9321-92F4F7053A65";
public const string LicenseTesting = "66DB7882-E2B0-471D-92C7-B2B52A0EA535";
public const string DefaultInterfaceTesting = "FAEF42AE-C1A4-419F-A912-B768AC2679EA";
public const string ConsumeNETServerTesting = "DE4ACF53-5957-4D31-8BE2-EA6C80683246";
public const string InspectableTesting = "CE137261-6F19-44F5-A449-EF963B3F987E";
public const string TrackMyLifetimeTesting = "4F54231D-9E11-4C0B-8E0B-2EBD8B0E5811";
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/System.Runtime.InteropServices/tests/LibraryImportGenerator.Tests/EnumTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Runtime.InteropServices;
using Xunit;
namespace LibraryImportGenerator.IntegrationTests
{
partial class NativeExportsNE
{
public partial class IntEnum
{
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_return_int")]
public static partial EnumTests.IntEnum Subtract_Return(EnumTests.IntEnum a, EnumTests.IntEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_out_int")]
public static partial void Subtract_Out(EnumTests.IntEnum a, EnumTests.IntEnum b, out EnumTests.IntEnum c);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_int")]
public static partial void Subtract_Ref(EnumTests.IntEnum a, ref EnumTests.IntEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_int")]
public static partial void Subtract_In(EnumTests.IntEnum a, in EnumTests.IntEnum b);
}
public partial class ByteEnum
{
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_return_byte")]
public static partial EnumTests.ByteEnum Subtract_Return(EnumTests.ByteEnum a, EnumTests.ByteEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_out_byte")]
public static partial void Subtract_Out(EnumTests.ByteEnum a, EnumTests.ByteEnum b, out EnumTests.ByteEnum c);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_byte")]
public static partial void Subtract_Ref(EnumTests.ByteEnum a, ref EnumTests.ByteEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_byte")]
public static partial void Subtract_In(EnumTests.ByteEnum a, in EnumTests.ByteEnum b);
}
}
public class EnumTests
{
internal enum IntEnum
{
Zero,
One,
Two,
Three,
Max = int.MaxValue
}
internal enum ByteEnum : byte
{
Zero,
One,
Two,
Three,
Max = byte.MaxValue
}
[Fact]
public void EnumByValue()
{
{
IntEnum ret = NativeExportsNE.IntEnum.Subtract_Return(IntEnum.Max, IntEnum.Zero);
Assert.Equal(IntEnum.Max, ret);
}
{
ByteEnum ret = NativeExportsNE.ByteEnum.Subtract_Return(ByteEnum.Max, ByteEnum.Zero);
Assert.Equal(ByteEnum.Max, ret);
}
}
[Fact]
public void EnumByRef()
{
{
IntEnum a = IntEnum.Three;
IntEnum b = IntEnum.Two;
IntEnum expected = IntEnum.One;
IntEnum ret;
NativeExportsNE.IntEnum.Subtract_Out(a, b, out ret);
Assert.Equal(expected, ret);
IntEnum refValue = b;
NativeExportsNE.IntEnum.Subtract_In(a, in refValue);
Assert.Equal(expected, refValue); // Value is updated even when passed with in keyword (matches built-in system)
refValue = b;
NativeExportsNE.IntEnum.Subtract_Ref(a, ref refValue);
Assert.Equal(expected, refValue);
}
{
ByteEnum a = ByteEnum.Three;
ByteEnum b = ByteEnum.Two;
ByteEnum expected = ByteEnum.One;
ByteEnum ret;
NativeExportsNE.ByteEnum.Subtract_Out(a, b, out ret);
Assert.Equal(expected, ret);
ByteEnum refValue = b;
NativeExportsNE.ByteEnum.Subtract_In(a, in refValue);
Assert.Equal(expected, refValue); // Value is updated even when passed with in keyword (matches built-in system)
refValue = b;
NativeExportsNE.ByteEnum.Subtract_Ref(a, ref refValue);
Assert.Equal(expected, refValue);
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Runtime.InteropServices;
using Xunit;
namespace LibraryImportGenerator.IntegrationTests
{
partial class NativeExportsNE
{
public partial class IntEnum
{
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_return_int")]
public static partial EnumTests.IntEnum Subtract_Return(EnumTests.IntEnum a, EnumTests.IntEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_out_int")]
public static partial void Subtract_Out(EnumTests.IntEnum a, EnumTests.IntEnum b, out EnumTests.IntEnum c);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_int")]
public static partial void Subtract_Ref(EnumTests.IntEnum a, ref EnumTests.IntEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_int")]
public static partial void Subtract_In(EnumTests.IntEnum a, in EnumTests.IntEnum b);
}
public partial class ByteEnum
{
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_return_byte")]
public static partial EnumTests.ByteEnum Subtract_Return(EnumTests.ByteEnum a, EnumTests.ByteEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_out_byte")]
public static partial void Subtract_Out(EnumTests.ByteEnum a, EnumTests.ByteEnum b, out EnumTests.ByteEnum c);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_byte")]
public static partial void Subtract_Ref(EnumTests.ByteEnum a, ref EnumTests.ByteEnum b);
[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "subtract_ref_byte")]
public static partial void Subtract_In(EnumTests.ByteEnum a, in EnumTests.ByteEnum b);
}
}
public class EnumTests
{
internal enum IntEnum
{
Zero,
One,
Two,
Three,
Max = int.MaxValue
}
internal enum ByteEnum : byte
{
Zero,
One,
Two,
Three,
Max = byte.MaxValue
}
[Fact]
public void EnumByValue()
{
{
IntEnum ret = NativeExportsNE.IntEnum.Subtract_Return(IntEnum.Max, IntEnum.Zero);
Assert.Equal(IntEnum.Max, ret);
}
{
ByteEnum ret = NativeExportsNE.ByteEnum.Subtract_Return(ByteEnum.Max, ByteEnum.Zero);
Assert.Equal(ByteEnum.Max, ret);
}
}
[Fact]
public void EnumByRef()
{
{
IntEnum a = IntEnum.Three;
IntEnum b = IntEnum.Two;
IntEnum expected = IntEnum.One;
IntEnum ret;
NativeExportsNE.IntEnum.Subtract_Out(a, b, out ret);
Assert.Equal(expected, ret);
IntEnum refValue = b;
NativeExportsNE.IntEnum.Subtract_In(a, in refValue);
Assert.Equal(expected, refValue); // Value is updated even when passed with in keyword (matches built-in system)
refValue = b;
NativeExportsNE.IntEnum.Subtract_Ref(a, ref refValue);
Assert.Equal(expected, refValue);
}
{
ByteEnum a = ByteEnum.Three;
ByteEnum b = ByteEnum.Two;
ByteEnum expected = ByteEnum.One;
ByteEnum ret;
NativeExportsNE.ByteEnum.Subtract_Out(a, b, out ret);
Assert.Equal(expected, ret);
ByteEnum refValue = b;
NativeExportsNE.ByteEnum.Subtract_In(a, in refValue);
Assert.Equal(expected, refValue); // Value is updated even when passed with in keyword (matches built-in system)
refValue = b;
NativeExportsNE.ByteEnum.Subtract_Ref(a, ref refValue);
Assert.Equal(expected, refValue);
}
}
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/libraries/Common/src/System/Security/Cryptography/Asn1Reader/System.Security.Cryptography.Asn1Reader.Shared.projitems | <Project>
<PropertyGroup>
<MSBuildAllProjects>$(MSBuildAllProjects);$(MSBuildThisFileFullPath)</MSBuildAllProjects>
<HasSharedItems>true</HasSharedItems>
<SharedGUID>4aaf81e6-6cdc-44fa-adfd-d7b47b9c998f</SharedGUID>
</PropertyGroup>
<ItemDefinitionGroup>
<Compile>
<Visible>true</Visible>
</Compile>
</ItemDefinitionGroup>
<ItemGroup>
<Compile Include="$(MSBuildThisFileDirectory)AsnValueReader.cs">
<Link>Common\System\Security\Cryptography\Asn1Reader\AsnValueReader.cs</Link>
</Compile>
</ItemGroup>
</Project>
| <Project>
<PropertyGroup>
<MSBuildAllProjects>$(MSBuildAllProjects);$(MSBuildThisFileFullPath)</MSBuildAllProjects>
<HasSharedItems>true</HasSharedItems>
<SharedGUID>4aaf81e6-6cdc-44fa-adfd-d7b47b9c998f</SharedGUID>
</PropertyGroup>
<ItemDefinitionGroup>
<Compile>
<Visible>true</Visible>
</Compile>
</ItemDefinitionGroup>
<ItemGroup>
<Compile Include="$(MSBuildThisFileDirectory)AsnValueReader.cs">
<Link>Common\System\Security\Cryptography\Asn1Reader\AsnValueReader.cs</Link>
</Compile>
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/Methodical/NaN/r4NaNrem_cs_do.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="r4NaNrem.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="r4NaNrem.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/mono/mono/tests/classinit.cs | using System;
class Foo {
static public int i = 0;
}
class Bar {
static public int j;
static Bar () {
j = Foo.i;
}
}
class Bug {
static public int Main () {
Foo.i = 5;
if (Bar.j != 5)
return 1;
return 0;
}
}
| using System;
class Foo {
static public int i = 0;
}
class Bar {
static public int j;
static Bar () {
j = Foo.i;
}
}
class Bug {
static public int Main () {
Foo.i = 5;
if (Bar.j != 5)
return 1;
return 0;
}
}
| -1 |
dotnet/runtime | 66,248 | Fix issues related to JsonSerializerOptions mutation and caching. | Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | eiriktsarpalis | 2022-03-05T19:59:48Z | 2022-03-07T19:44:14Z | 44ec3c9474b3a93eb4f71791a43d8bb5c0b08a58 | 8b4eaf94140f488743d0c78caa3afec3b9c5d789 | Fix issues related to JsonSerializerOptions mutation and caching.. Second attempt at merging https://github.com/dotnet/runtime/pull/65863. Original PR introduced test failures in netfx and was reverted in https://github.com/dotnet/runtime/pull/66235.
cc @jkotas | ./src/tests/JIT/CodeGenBringUpTests/mul2_r.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="mul2.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="mul2.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/codegenxarch.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Amd64/x86 Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#pragma warning(disable : 4310) // cast truncates constant value - happens for (int8_t)0xb1
#endif
#ifdef TARGET_XARCH
#include "emit.h"
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
#include "patchpointinfo.h"
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
#ifdef TARGET_AMD64
if ((size_t)(int)compiler->gsGlobalSecurityCookieVal != compiler->gsGlobalSecurityCookieVal)
{
// initReg = #GlobalSecurityCookieVal64; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
*pInitRegZeroed = false;
}
else
#endif
{
// mov dword ptr [frame.GSSecurityCookie], #GlobalSecurityCookieVal
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Always use EAX on x86 and x64
// On x64, if we're not moving into RAX, and the address isn't RIP relative, we can't encode it.
// mov eax, dword ptr [compiler->gsGlobalSecurityCookieAddr]
// mov dword ptr [frame.GSSecurityCookie], eax
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_EAX, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
regSet.verifyRegUsed(REG_EAX);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, REG_EAX, compiler->lvaGSSecurityCookie, 0);
if (initReg == REG_EAX)
{
*pInitRegZeroed = false;
}
}
}
/*****************************************************************************
*
* Generate code to check that the GS cookie wasn't thrashed by a buffer
* overrun. If pushReg is true, preserve all registers around code sequence.
* Otherwise ECX could be modified.
*
* Implementation Note: pushReg = true, in case of tail calls.
*/
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that EAX is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by EAX.
//
// For Amd64 System V, a two-register-returned struct could be returned in RAX and RDX
// In such case make sure that the correct GC-ness of RDX is reported as well, so
// a GC object pointed by RDX will not be collected.
if (!pushReg)
{
// Handle multi-reg return type values
if (compiler->compMethodReturnsMultiRegRetType())
{
ReturnTypeDesc retTypeDesc;
if (varTypeIsLong(compiler->info.compRetNativeType))
{
retTypeDesc.InitializeLongReturnType();
}
else // we must have a struct return type
{
retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
compiler->info.compCallConv);
}
const unsigned regCount = retTypeDesc.GetReturnRegCount();
// Only x86 and x64 Unix ABI allows multi-reg return and
// number of result regs should be equal to MAX_RET_REG_COUNT.
assert(regCount == MAX_RET_REG_COUNT);
for (unsigned i = 0; i < regCount; ++i)
{
gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
// This is for returning in an implicit RetBuf.
// If the address of the buffer is returned in REG_INTRET, mark the content of INTRET as ByRef.
// In case the return is in an implicit RetBuf, the native return type should be a struct
assert(varTypeIsStruct(compiler->info.compRetNativeType));
gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
}
// ... all other cases.
else
{
#ifdef TARGET_AMD64
// For x64, structs that are not returned in registers are always
// returned in implicit RetBuf. If we reached here, we should not have
// a RetBuf and the return type should not be a struct.
assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
assert(!varTypeIsStruct(compiler->info.compRetNativeType));
#endif // TARGET_AMD64
// For x86 Windows we can't make such assertions since we generate code for returning of
// the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
// compRetNativeType could be TYP_STRUCT.
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
}
}
regNumber regGSCheck;
regMaskTP regMaskGSCheck = RBM_NONE;
if (!pushReg)
{
// Non-tail call: we can use any callee trash register that is not
// a return register or contain 'this' pointer (keep alive this), since
// we are generating GS cookie check after a GT_RETURN block.
// Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
// as return register for two-register-returned structs.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg() &&
(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum() == REG_ARG_0))
{
regGSCheck = REG_ARG_1;
}
else
{
regGSCheck = REG_ARG_0;
}
}
else
{
#ifdef TARGET_X86
// It doesn't matter which register we pick, since we're going to save and restore it
// around the check.
// TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes?
regGSCheck = REG_EAX;
regMaskGSCheck = RBM_EAX;
#else // !TARGET_X86
// Jmp calls: specify method handle using which JIT queries VM for its entry point
// address and hence it can neither be a VSD call nor PInvoke calli with cookie
// parameter. Therefore, in case of jmp calls it is safe to use R11.
regGSCheck = REG_R11;
#endif // !TARGET_X86
}
regMaskTP byrefPushedRegs = RBM_NONE;
regMaskTP norefPushedRegs = RBM_NONE;
regMaskTP pushedRegs = RBM_NONE;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
#if defined(TARGET_AMD64)
// If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
// Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSCheck, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
else
#endif // defined(TARGET_AMD64)
{
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Ngen case - GS cookie value needs to be accessed through an indirection.
pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_je, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
}
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
#if defined(FEATURE_EH_FUNCLETS)
// Generate a call to the finally, like this:
// mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
// call finally-funclet
// jmp finally-return // Only for non-retless finally calls
// The jmp can be a NOP if we're going to the next block.
// If we're generating code for the main function (not a funclet), and there is no localloc,
// then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP
// instead of loading the PSPSym in this case, or if PSPSym is not used (CoreRT ABI).
if ((compiler->lvaPSPSym == BAD_VAR_NUM) ||
(!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT)))
{
#ifndef UNIX_X86_ABI
inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
#endif // !UNIX_X86_ABI
}
else
{
GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
}
GetEmitter()->emitIns_J(INS_call, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// We have a retless call, and the last instruction generated was a call.
// If the next block is in a different EH region (or is the end of the code
// block), then we need to generate a breakpoint here (since it will never
// get executed) to get proper unwind behavior.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
else
{
// TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other
// architectures?
#ifndef JIT32_GCENCODER
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
GetEmitter()->emitDisableGC();
#endif // JIT32_GCENCODER
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
{
// Fall-through.
// TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
// to the next instruction? This would depend on stack walking from within the finally
// handler working without this instruction being in this special EH region.
instGen(INS_nop);
}
else
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
#ifndef JIT32_GCENCODER
GetEmitter()->emitEnableGC();
#endif // JIT32_GCENCODER
}
#else // !FEATURE_EH_FUNCLETS
// If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
// corresponding to the finally's nesting level. When invoked in response to an exception, the
// EE does this.
//
// We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
//
// We will emit :
// mov [ebp - (n + 1)], 0
// mov [ebp - n ], 0xFC
// push &step
// jmp finallyBlock
// ...
// step:
// mov [ebp - n ], 0
// jmp leaveTarget
// ...
// leaveTarget:
noway_assert(isFramePointerUsed());
// Get the nesting level which contains the finally
unsigned finallyNesting = 0;
compiler->fgGetNestingLevel(block, &finallyNesting);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
// Zero out the slot for the next nesting level
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar,
curNestingSlotOffs - TARGET_POINTER_SIZE, 0);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK);
// Now push the address where the finally funclet should return to directly.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
GetEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
}
else
{
// EE expects a DWORD, so we provide 0
inst_IV(INS_push_hide, 0);
}
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
#endif // !FEATURE_EH_FUNCLETS
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
block = block->bbNext;
}
return block;
}
#if defined(FEATURE_EH_FUNCLETS)
void CodeGen::genEHCatchRet(BasicBlock* block)
{
// Set RAX to the address the VM should return to after the catch.
// Generate a RIP-relative
// lea reg, [rip + disp32] ; the RIP is implicit
// which will be position-independent.
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
}
#else // !FEATURE_EH_FUNCLETS
void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block)
{
// The last statement of the block must be a GT_RETFILT, which has already been generated.
assert(block->lastNode() != nullptr);
assert(block->lastNode()->OperGet() == GT_RETFILT);
if (block->bbJumpKind == BBJ_EHFINALLYRET)
{
assert(block->lastNode()->AsOp()->gtOp1 == nullptr); // op1 == nullptr means endfinally
// Return using a pop-jmp sequence. As the "try" block calls
// the finally with a jmp, this leaves the x86 call-ret stack
// balanced in the normal flow of path.
noway_assert(isFramePointerRequired());
inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
}
else
{
assert(block->bbJumpKind == BBJ_EHFILTERRET);
// The return value has already been computed.
instGen_Return(0);
}
}
#endif // !FEATURE_EH_FUNCLETS
// Move an immediate value into an integer register
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
emitAttr origAttr = size;
if (!compiler->opts.compReloc)
{
// Strip any reloc flags from size if we aren't doing relocs
size = EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG | EA_DSP_RELOC_FLG);
}
if ((imm == 0) && !EA_IS_RELOC(size))
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
// Only use lea if the original was relocatable. Otherwise we can get spurious
// instruction selection due to different memory placement at runtime.
if (EA_IS_RELOC(origAttr) && genDataIndirAddrCanBeEncodedAsPCRelOffset(imm))
{
// We will use lea so displacement and not immediate will be relocatable
size = EA_SET_FLG(EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG), EA_DSP_RELOC_FLG);
GetEmitter()->emitIns_R_AI(INS_lea, size, reg, imm);
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm DEBUGARG(gtFlags));
}
}
regSet.verifyRegUsed(reg);
}
/***********************************************************************************
*
* Generate code to set a register 'targetReg' of type 'targetType' to the constant
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// Currently this cannot be done for all handles due to
// https://github.com/dotnet/runtime/issues/60712. However, it is
// also unclear whether we unconditionally want to use rip-relative
// lea instructions when not necessary. While a mov is larger, on
// many Intel CPUs rip-relative lea instructions have higher
// latency.
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal, INS_FLAGS_DONT_CARE DEBUGARG(0) DEBUGARG(tree->gtFlags));
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(targetType);
double constValue = tree->AsDblCon()->gtDconVal;
// Make sure we use "xorps reg, reg" only for +ve zero constant (0.0) and not for -ve zero (-0.0)
if (*(__int64*)&constValue == 0)
{
// A faster/smaller way to generate 0
emit->emitIns_R_R(INS_xorps, size, targetReg, targetReg);
}
else
{
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(constValue, size);
emit->emitIns_R_C(ins_Load(targetType), size, targetReg, hnd, 0);
}
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
if (varTypeIsFloating(targetType))
{
assert(tree->gtOper == GT_NEG);
genSSE2BitwiseOp(tree);
}
else
{
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
inst_RV(ins, targetReg, targetType);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBswap: Produce code for a GT_BSWAP / GT_BSWAP16 node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForBswap(GenTree* tree)
{
// TODO: If we're swapping immediately after a read from memory or immediately before
// a write to memory, use the MOVBE instruction instead of the BSWAP instruction if
// the platform supports it.
assert(tree->OperIs(GT_BSWAP, GT_BSWAP16));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
if (tree->OperIs(GT_BSWAP))
{
// 32-bit and 64-bit byte swaps use "bswap reg"
inst_RV(INS_bswap, targetReg, targetType);
}
else
{
// 16-bit byte swaps use "ror reg.16, 8"
inst_RV_IV(INS_ror_N, targetReg, 8 /* val */, emitAttr::EA_2BYTE);
}
genProduceReg(tree);
}
// Produce code for a GT_INC_SATURATE node.
void CodeGen::genCodeForIncSaturate(GenTree* tree)
{
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
inst_RV_IV(INS_add, targetReg, 1, emitActualTypeSize(targetType));
inst_RV_IV(INS_sbb, targetReg, 0, emitActualTypeSize(targetType));
genProduceReg(tree);
}
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
assert(!treeNode->gtOverflowEx());
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(treeNode);
GenTree* op1 = treeNode->AsOp()->gtOp1;
GenTree* op2 = treeNode->AsOp()->gtOp2;
// to get the high bits of the multiply, we are constrained to using the
// 1-op form: RDX:RAX = RAX * rm
// The 3-op form (Rx=Ry*Rz) does not support it.
genConsumeOperands(treeNode->AsOp());
GenTree* regOp = op1;
GenTree* rmOp = op2;
// Set rmOp to the memory operand (if any)
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == REG_RAX)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, REG_RAX, regOp->GetRegNum(), /* canSkip */ true);
instruction ins;
if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
{
ins = INS_imulEAX;
}
else
{
ins = INS_mulEAX;
}
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (treeNode->OperGet() == GT_MULHI)
{
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForLongUMod: Generate code for a tree of the form
// `(umod (gt_long x y) (const int))`
//
// Arguments:
// node - the node for which to generate code
//
void CodeGen::genCodeForLongUMod(GenTreeOp* node)
{
assert(node != nullptr);
assert(node->OperGet() == GT_UMOD);
assert(node->TypeGet() == TYP_INT);
GenTreeOp* const dividend = node->gtOp1->AsOp();
assert(dividend->OperGet() == GT_LONG);
assert(varTypeIsLong(dividend));
genConsumeOperands(node);
GenTree* const dividendLo = dividend->gtOp1;
GenTree* const dividendHi = dividend->gtOp2;
assert(dividendLo->isUsedFromReg());
assert(dividendHi->isUsedFromReg());
GenTree* const divisor = node->gtOp2;
assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
// dividendLo must be in RAX; dividendHi must be in RDX
genCopyRegIfNeeded(dividendLo, REG_EAX);
genCopyRegIfNeeded(dividendHi, REG_EDX);
// At this point, EAX:EDX contains the 64bit dividend and op2->GetRegNum()
// contains the 32bit divisor. We want to generate the following code:
//
// cmp edx, divisor->GetRegNum()
// jb noOverflow
//
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
//
// noOverflow:
// div divisor->GetRegNum()
//
// This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c.
BasicBlock* const noOverflow = genCreateTempLabel();
// cmp edx, divisor->GetRegNum()
// jb noOverflow
inst_RV_RV(INS_cmp, REG_EDX, divisor->GetRegNum());
inst_JMP(EJ_jb, noOverflow);
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
const regNumber tempReg = node->GetSingleTempReg();
inst_Mov(TYP_INT, tempReg, REG_EAX, /* canSkip */ false);
inst_Mov(TYP_INT, REG_EAX, REG_EDX, /* canSkip */ false);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
inst_Mov(TYP_INT, REG_EAX, tempReg, /* canSkip */ false);
// noOverflow:
// div divisor->GetRegNum()
genDefineTempLabel(noOverflow);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
const regNumber targetReg = node->GetRegNum();
inst_Mov(TYP_INT, targetReg, REG_RDX, /* canSkip */ true);
genProduceReg(node);
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForDivMod: Generate code for a DIV or MOD operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
GenTree* dividend = treeNode->gtOp1;
#ifdef TARGET_X86
if (varTypeIsLong(dividend->TypeGet()))
{
genCodeForLongUMod(treeNode);
return;
}
#endif // TARGET_X86
GenTree* divisor = treeNode->gtOp2;
genTreeOps oper = treeNode->OperGet();
emitAttr size = emitTypeSize(treeNode);
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int/native int, small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
// dividend is in a register.
assert(dividend->isUsedFromReg());
genConsumeOperands(treeNode->AsOp());
// dividend must be in RAX
genCopyRegIfNeeded(dividend, REG_RAX);
// zero or sign extend rax to rdx
if (oper == GT_UMOD || oper == GT_UDIV ||
(dividend->IsIntegralConst() && (dividend->AsIntConCommon()->IconValue() > 0)))
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
}
else
{
emit->emitIns(INS_cdq, size);
// the cdq instruction writes RDX, So clear the gcInfo for RDX
gcInfo.gcMarkRegSetNpt(RBM_RDX);
}
// Perform the 'targetType' (64-bit or 32-bit) divide instruction
instruction ins;
if (oper == GT_UMOD || oper == GT_UDIV)
{
ins = INS_div;
}
else
{
ins = INS_idiv;
}
emit->emitInsBinary(ins, size, treeNode, divisor);
// DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
// Move the result to the desired register, if necessary
if (oper == GT_DIV || oper == GT_UDIV)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
else
{
assert((oper == GT_MOD) || (oper == GT_UMOD));
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForBinary: Generate code for many binary arithmetic operators
//
// Arguments:
// treeNode - The binary operation for which we are generating code.
//
// Return Value:
// None.
//
// Notes:
// Integer MUL and DIV variants have special constraints on x64 so are not handled here.
// See the assert below for the operators that are handled.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
#ifdef DEBUG
bool isValidOper = treeNode->OperIs(GT_ADD, GT_SUB);
if (varTypeIsFloating(treeNode->TypeGet()))
{
isValidOper |= treeNode->OperIs(GT_MUL, GT_DIV);
}
else
{
isValidOper |= treeNode->OperIs(GT_AND, GT_OR, GT_XOR);
#ifndef TARGET_64BIT
isValidOper |= treeNode->OperIs(GT_ADD_LO, GT_ADD_HI, GT_SUB_LO, GT_SUB_HI);
#endif
}
assert(isValidOper);
#endif
genConsumeOperands(treeNode);
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// Commutative operations can mark op1 as contained or reg-optional to generate "op reg, memop/immed"
if (!op1->isUsedFromReg())
{
assert(treeNode->OperIsCommutative());
assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
op1->IsRegOptional());
op1 = treeNode->gtGetOp2();
op2 = treeNode->gtGetOp1();
}
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
regNumber op1reg = op1->isUsedFromReg() ? op1->GetRegNum() : REG_NA;
regNumber op2reg = op2->isUsedFromReg() ? op2->GetRegNum() : REG_NA;
if (varTypeIsFloating(treeNode->TypeGet()))
{
// floating-point addition, subtraction, multiplication, and division
// all have RMW semantics if VEX support is not available
bool isRMW = !compiler->canUseVexEncoding();
inst_RV_RV_TT(ins, emitTypeSize(treeNode), targetReg, op1reg, op2, isRMW);
genProduceReg(treeNode);
return;
}
GenTree* dst;
GenTree* src;
// This is the case of reg1 = reg1 op reg2
// We're ready to emit the instruction without any moves
if (op1reg == targetReg)
{
dst = op1;
src = op2;
}
// We have reg1 = reg2 op reg1
// In order for this operation to be correct
// we need that op is a commutative operation so
// we can convert it into reg1 = reg1 op reg2 and emit
// the same code as above
else if (op2reg == targetReg)
{
noway_assert(GenTree::OperIsCommutative(oper));
dst = op2;
src = op1;
}
// now we know there are 3 different operands so attempt to use LEA
else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
&& (op2->isContainedIntOrIImmed() || op2->isUsedFromReg()) && !treeNode->gtSetFlags())
{
if (op2->isContainedIntOrIImmed())
{
emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
(int)op2->AsIntConCommon()->IconValue());
}
else
{
assert(op2reg != REG_NA);
emit->emitIns_R_ARX(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, op2reg, 1, 0);
}
genProduceReg(treeNode);
return;
}
// dest, op1 and op2 registers are different:
// reg3 = reg1 op reg2
// We can implement this by issuing a mov:
// reg3 = reg1
// reg3 = reg3 op reg2
else
{
var_types op1Type = op1->TypeGet();
inst_Mov(op1Type, targetReg, op1reg, /* canSkip */ false);
regSet.verifyRegUsed(targetReg);
gcInfo.gcMarkRegPtrVal(targetReg, op1Type);
dst = treeNode;
src = op2;
}
// try to use an inc or dec
if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
{
if (src->IsIntegralConst(1))
{
emit->emitIns_R(INS_inc, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
else if (src->IsIntegralConst(-1))
{
emit->emitIns_R(INS_dec, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
}
regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
noway_assert(r == targetReg);
if (treeNode->gtOverflowEx())
{
#if !defined(TARGET_64BIT)
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
#else
assert(oper == GT_ADD || oper == GT_SUB);
#endif
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForMul: Generate code for a MUL operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForMul(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_MUL));
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int or long (only on x64), small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
instruction ins;
emitAttr size = emitTypeSize(treeNode);
bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
bool requiresOverflowCheck = treeNode->gtOverflowEx();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// there are 3 forms of x64 multiply:
// 1-op form with 128 result: RDX:RAX = RAX * rm
// 2-op form: reg *= rm
// 3-op form: reg = rm * imm
genConsumeOperands(treeNode);
// This matches the 'mul' lowering in Lowering::SetMulOpCounts()
//
// immOp :: Only one operand can be an immediate
// rmOp :: Only one operand can be a memory op.
// regOp :: A register op (especially the operand that matches 'targetReg')
// (can be nullptr when we have both a memory op and an immediate op)
GenTree* immOp = nullptr;
GenTree* rmOp = op1;
GenTree* regOp;
if (op2->isContainedIntOrIImmed())
{
immOp = op2;
}
else if (op1->isContainedIntOrIImmed())
{
immOp = op1;
rmOp = op2;
}
if (immOp != nullptr)
{
// CQ: When possible use LEA for mul by imm 3, 5 or 9
ssize_t imm = immOp->AsIntConCommon()->IconValue();
if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
{
// We will use the LEA instruction to perform this multiply
// Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
unsigned int scale = (unsigned int)(imm - 1);
GetEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->GetRegNum(), rmOp->GetRegNum(), scale, 0);
}
else if (!requiresOverflowCheck && rmOp->isUsedFromReg() && (imm == genFindLowestBit(imm)) && (imm != 0))
{
// Use shift for constant multiply when legal
uint64_t zextImm = static_cast<uint64_t>(static_cast<size_t>(imm));
unsigned int shiftAmount = genLog2(zextImm);
// Copy reg src to dest register
inst_Mov(targetType, targetReg, rmOp->GetRegNum(), /* canSkip */ true);
inst_RV_SH(INS_shl, size, targetReg, shiftAmount);
}
else
{
// use the 3-op form with immediate
ins = GetEmitter()->inst3opImulForReg(targetReg);
emit->emitInsBinary(ins, size, rmOp, immOp);
}
}
else // we have no contained immediate operand
{
regOp = op1;
rmOp = op2;
regNumber mulTargetReg = targetReg;
if (isUnsignedMultiply && requiresOverflowCheck)
{
ins = INS_mulEAX;
mulTargetReg = REG_RAX;
}
else
{
ins = INS_imul;
}
// Set rmOp to the memory operand (if any)
// or set regOp to the op2 when it has the matching target register for our multiply op
//
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == mulTargetReg)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, mulTargetReg, regOp->GetRegNum(), /* canSkip */ true);
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (ins == INS_mulEAX)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
}
if (requiresOverflowCheck)
{
// Overflow checking is only used for non-floating point types
noway_assert(!varTypeIsFloating(treeNode));
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
// This is a case of operand is in a single reg and needs to be
// returned in multiple ABI return registers.
regNumber opReg = src->GetRegNum();
regNumber reg0 = retTypeDesc->GetABIReturnReg(0);
regNumber reg1 = retTypeDesc->GetABIReturnReg(1);
assert((reg0 != REG_NA) && (reg1 != REG_NA) && (opReg != REG_NA));
const bool srcIsFloatReg = genIsValidFloatReg(opReg);
const bool dstIsFloatReg = genIsValidFloatReg(reg0);
assert(srcIsFloatReg);
#ifdef TARGET_AMD64
assert(src->TypeIs(TYP_SIMD16));
assert(srcIsFloatReg == dstIsFloatReg);
if (opReg != reg0 && opReg != reg1)
{
// Operand reg is different from return regs.
// Copy opReg to reg0 and let it to be handled by one of the
// two cases below.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
opReg = reg0;
}
if (opReg == reg0)
{
assert(opReg != reg1);
// reg1 = opReg.
inst_Mov(TYP_SIMD16, reg1, opReg, /* canSkip */ false);
}
else
{
assert(opReg == reg1);
// reg0 = opReg.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
}
// reg0 - already has required 8-byte in bit position [63:0].
// swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
#else // TARGET_X86
assert(src->TypeIs(TYP_SIMD8));
assert(srcIsFloatReg != dstIsFloatReg);
assert((reg0 == REG_EAX) && (reg1 == REG_EDX));
// reg0 = opReg[31:0]
inst_Mov(TYP_INT, reg0, opReg, /* canSkip */ false);
// reg1 = opRef[61:32]
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
inst_RV_TT_IV(INS_pextrd, EA_4BYTE, reg1, src, 1);
}
else
{
int8_t shuffleMask = 1; // we only need [61:32]->[31:0], the rest is not read.
inst_RV_TT_IV(INS_pshufd, EA_8BYTE, opReg, src, shuffleMask);
inst_Mov(TYP_INT, reg1, opReg, /* canSkip */ false);
}
#endif // TARGET_X86
}
#endif // FEATURE_SIMD
#if defined(TARGET_X86)
//------------------------------------------------------------------------
// genFloatReturn: Generates code for float return statement for x86.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with float type.
//
// Return Value:
// None
//
void CodeGen::genFloatReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
assert(varTypeIsFloating(treeNode));
GenTree* op1 = treeNode->gtGetOp1();
// Spill the return value register from an XMM register to the stack, then load it on the x87 stack.
// If it already has a home location, use that. Otherwise, we need a temp.
if (genIsRegCandidateLocal(op1) && compiler->lvaGetDesc(op1->AsLclVarCommon())->lvOnFrame)
{
if (compiler->lvaGetDesc(op1->AsLclVarCommon())->GetRegNum() != REG_STK)
{
op1->gtFlags |= GTF_SPILL;
inst_TT_RV(ins_Store(op1->gtType, compiler->isSIMDTypeLocalAligned(op1->AsLclVarCommon()->GetLclNum())),
emitTypeSize(op1->TypeGet()), op1, op1->GetRegNum());
}
// Now, load it to the fp stack.
GetEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->GetLclNum(), 0);
}
else
{
// Spill the value, which should be in a register, then load it to the fp stack.
// TODO-X86-CQ: Deal with things that are already in memory (don't call genConsumeReg yet).
op1->gtFlags |= GTF_SPILL;
regSet.rsSpillTree(op1->GetRegNum(), op1);
op1->gtFlags |= GTF_SPILLED;
op1->gtFlags &= ~GTF_SPILL;
TempDsc* t = regSet.rsUnspillInPlace(op1, op1->GetRegNum());
inst_FS_ST(INS_fld, emitActualTypeSize(op1->gtType), t, 0);
op1->gtFlags &= ~GTF_SPILLED;
regSet.tmpRlsTemp(t);
}
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE/GT_CMP node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
assert(tree->OperIs(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE, GT_CMP));
// TODO-XArch-CQ: Check if we can use the currently set flags.
// TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
GenTree* op1 = tree->gtOp1;
var_types op1Type = op1->TypeGet();
if (varTypeIsFloating(op1Type))
{
genCompareFloat(tree);
}
else
{
genCompareInt(tree);
}
}
//------------------------------------------------------------------------
// genCodeForBT: Generates code for a GT_BT node.
//
// Arguments:
// tree - The node.
//
void CodeGen::genCodeForBT(GenTreeOp* bt)
{
assert(bt->OperIs(GT_BT));
GenTree* op1 = bt->gtGetOp1();
GenTree* op2 = bt->gtGetOp2();
var_types type = genActualType(op1->TypeGet());
assert(op1->isUsedFromReg() && op2->isUsedFromReg());
assert((genTypeSize(type) >= genTypeSize(TYP_INT)) && (genTypeSize(type) <= genTypeSize(TYP_I_IMPL)));
genConsumeOperands(bt);
// Note that the emitter doesn't fully support INS_bt, it only supports the reg,reg
// form and encodes the registers in reverse order. To get the correct order we need
// to reverse the operands when calling emitIns_R_R.
GetEmitter()->emitIns_R_R(INS_bt, emitTypeSize(type), op2->GetRegNum(), op1->GetRegNum());
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_jl }, // SLT
{ EJ_jle }, // SLE
{ EJ_jge }, // SGE
{ EJ_jg }, // SGT
{ EJ_js }, // S
{ EJ_jns }, // NS
{ EJ_je }, // EQ
{ EJ_jne }, // NE
{ EJ_jb }, // ULT
{ EJ_jbe }, // ULE
{ EJ_jae }, // UGE
{ EJ_ja }, // UGT
{ EJ_jb }, // C
{ EJ_jae }, // NC
// Floating point compare instructions (UCOMISS, UCOMISD etc.) set the condition flags as follows:
// ZF PF CF Meaning
// ---------------------
// 1 1 1 Unordered
// 0 0 0 Greater
// 0 0 1 Less Than
// 1 0 0 Equal
//
// Since ZF and CF are also set when the result is unordered, in some cases we first need to check
// PF before checking ZF/CF. In general, ordered conditions will result in a jump only if PF is not
// set and unordered conditions will result in a jump only if PF is set.
{ EJ_jnp, GT_AND, EJ_je }, // FEQ
{ EJ_jne }, // FNE
{ EJ_jnp, GT_AND, EJ_jb }, // FLT
{ EJ_jnp, GT_AND, EJ_jbe }, // FLE
{ EJ_jae }, // FGE
{ EJ_ja }, // FGT
{ EJ_jo }, // O
{ EJ_jno }, // NO
{ EJ_je }, // FEQU
{ EJ_jp, GT_OR, EJ_jne }, // FNEU
{ EJ_jb }, // FLTU
{ EJ_jbe }, // FLEU
{ EJ_jp, GT_OR, EJ_jae }, // FGEU
{ EJ_jp, GT_OR, EJ_ja }, // FGTU
{ EJ_jp }, // P
{ EJ_jnp }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg) && isByteReg(dstReg));
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
if (!varTypeIsByte(type))
{
GetEmitter()->emitIns_Mov(INS_movzx, EA_1BYTE, dstReg, dstReg, /* canSkip */ false);
}
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_je, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
regNumber tmpReg = tree->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(tmpReg));
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN, tmpReg);
genDefineTempLabel(skipLabel);
}
/*****************************************************************************
*
* Generate code for a single node in the tree.
* Preconditions: All operands have been evaluated
*
*/
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg;
#if !defined(TARGET_64BIT)
if (treeNode->TypeGet() == TYP_LONG)
{
// All long enregistered nodes will have been decomposed into their
// constituent lo and hi nodes.
targetReg = REG_NA;
}
else
#endif // !defined(TARGET_64BIT)
{
targetReg = treeNode->GetRegNum();
}
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperIsConst()));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
#ifndef JIT32_GCENCODER
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
#endif // !defined(JIT32_GCENCODER)
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
#ifdef PROFILING_SUPPORTED
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
#ifdef TARGET_X86
assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL));
#endif // TARGET_X86
FALLTHROUGH;
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
case GT_DIV:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
FALLTHROUGH;
case GT_MOD:
case GT_UMOD:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
genCodeForBinary(treeNode->AsOp());
break;
case GT_MUL:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
genCodeForMul(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode);
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
#ifdef TARGET_X86
case GT_MUL_LONG:
#endif
genCodeForMulHi(treeNode->AsOp());
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_BT:
genCodeForBT(treeNode->AsOp());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_LOCKADD:
genCodeForLockAdd(treeNode->AsOp());
break;
case GT_XCHG:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_XORR:
case GT_XAND:
NYI("Interlocked.Or and Interlocked.And aren't implemented for x86 yet.");
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
genConsumeRegs(treeNode->AsOp()->gtOp1);
break;
case GT_NO_OP:
GetEmitter()->emitIns_Nop(1);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
// Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
// mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
size_t finallyNesting;
finallyNesting = treeNode->AsVal()->gtVal1;
noway_assert(treeNode->AsVal()->gtVal1 < compiler->compHndBBtabCount);
noway_assert(finallyNesting < compiler->compHndBBtabCount);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
TARGET_POINTER_SIZE); // below doesn't underflow.
filterEndOffsetSlotOffs =
(unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
size_t curNestingSlotOffs;
curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, (unsigned)curNestingSlotOffs,
0);
break;
#endif // !FEATURE_EH_FUNCLETS
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->GetRegNum());
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, ArrLen(message), _TRUNCATE, "NYI: Unimplemented node type %s\n",
GenTree::OpName(treeNode->OperGet()));
NYIRAW(message);
#endif
assert(!"Unknown node in codegen");
}
break;
}
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GenTreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
assert(varTypeIsSIMD(lclNode));
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount = actualOp1->GetMultiRegCount(compiler);
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Right now the only enregistrable structs supported are SIMD types.
// They are only returned in 1 or 2 registers - the 1 register case is
// handled as a regular STORE_LCL_VAR.
// This case is always a call (AsCall() will assert if it is not).
GenTreeCall* call = actualOp1->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc->GetReturnRegCount() == MAX_RET_REG_COUNT);
assert(regCount == 2);
regNumber targetReg = lclNode->GetRegNum();
regNumber reg0 = call->GetRegNumByIdx(0);
regNumber reg1 = call->GetRegNumByIdx(1);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
if (reloadReg != REG_NA)
{
reg0 = reloadReg;
}
reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
if (reloadReg != REG_NA)
{
reg1 = reloadReg;
}
}
#ifdef UNIX_AMD64_ABI
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(1)));
// This is a case where the two 8-bytes that comprise the operand are in
// two different xmm registers and need to be assembled into a single
// xmm register.
if (targetReg != reg0 && targetReg != reg1)
{
// targetReg = reg0;
// targetReg[127:64] = reg1[127:64]
inst_Mov(TYP_DOUBLE, targetReg, reg0, /* canSkip */ false);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else if (targetReg == reg0)
{
// (elided) targetReg = reg0
// targetReg[127:64] = reg1[127:64]
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else
{
assert(targetReg == reg1);
// We need two shuffles to achieve this
// First:
// targetReg[63:0] = targetReg[63:0]
// targetReg[127:64] = reg0[63:0]
//
// Second:
// targetReg[63:0] = targetReg[127:64]
// targetReg[127:64] = targetReg[63:0]
//
// Essentially copy low 8-bytes from reg0 to high 8-bytes of targetReg
// and next swap low and high 8-bytes of targetReg to have them
// rearranged in the right order.
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg0, 0x00);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01);
}
genProduceReg(lclNode);
#elif defined(TARGET_X86)
if (TargetOS::IsWindows)
{
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(1)));
assert(lclNode->TypeIs(TYP_SIMD8));
// This is a case where a SIMD8 struct returned as [EAX, EDX]
// and needs to be assembled into a single xmm register,
// note we can't check reg0=EAX, reg1=EDX because they could be already moved.
inst_Mov(TYP_FLOAT, targetReg, reg0, /* canSkip */ false);
const emitAttr size = emitTypeSize(TYP_SIMD8);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
GetEmitter()->emitIns_SIMD_R_R_R_I(INS_pinsrd, size, targetReg, targetReg, reg1, 1);
}
else
{
regNumber tempXmm = lclNode->GetSingleTempReg();
assert(tempXmm != targetReg);
inst_Mov(TYP_FLOAT, tempXmm, reg1, /* canSkip */ false);
GetEmitter()->emitIns_SIMD_R_R_R(INS_punpckldq, size, targetReg, targetReg, tempXmm);
}
genProduceReg(lclNode);
}
#elif defined(TARGET_AMD64)
assert(!TargetOS::IsWindows || !"Multireg store to SIMD reg not supported on Windows x64");
#else
#error Unsupported or unset target architecture
#endif
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
if (delta == 0)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, /* canSkip */ false);
#ifdef USING_SCOPE_INFO
psiMoveESPtoEBP();
#endif // USING_SCOPE_INFO
}
else
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
// We don't update prolog scope info (there is no function to handle lea), but that is currently dead code
// anyway.
}
if (reportUnwindData)
{
compiler->unwindSetFrameReg(REG_FPBASE, delta);
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack and allocate the local stack frame - subtract from SP.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
if (frameSize == REGSIZE_BYTES)
{
// Frame size is the same as register size.
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_EAX);
compiler->unwindAllocStack(frameSize);
}
else if (frameSize < pageSize)
{
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
compiler->unwindAllocStack(frameSize);
const unsigned lastProbedLocToFinalSp = frameSize;
if (lastProbedLocToFinalSp + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize)
{
// We haven't probed almost a complete page. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we need to probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_AR(INS_test, EA_4BYTE, REG_EAX, REG_SPBASE, 0);
}
}
else
{
#ifdef TARGET_X86
int spOffset = -(int)frameSize;
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
spOffset += REGSIZE_BYTES;
}
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, spOffset);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_pop, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
}
#else // !TARGET_X86
static_assert_no_msg((RBM_STACK_PROBE_HELPER_ARG & (RBM_SECRET_STUB_PARAM | RBM_DEFAULT_HELPER_CALL_TARGET)) ==
RBM_NONE);
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, -(int)frameSize);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (initReg == REG_DEFAULT_HELPER_CALL_TARGET)
{
*pInitRegZeroed = false;
}
static_assert_no_msg((RBM_STACK_PROBE_HELPER_TRASH & RBM_STACK_PROBE_HELPER_ARG) == RBM_NONE);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
#endif // !TARGET_X86
compiler->unwindAllocStack(frameSize);
if (initReg == REG_STACK_PROBE_HELPER_ARG)
{
*pInitRegZeroed = false;
}
}
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(frameSize);
}
#endif // USING_SCOPE_INFO
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_X86
if (regTmp != REG_NA)
{
// For x86, some cases don't want to use "sub ESP" because we don't want the emitter to track the adjustment
// to ESP. So do the work in the count register.
// TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
// creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
// track".
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, (target_ssize_t)-spDelta, EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
}
else
#endif // TARGET_X86
{
inst_RV_IV(INS_sub, REG_SPBASE, (target_ssize_t)-spDelta, EA_PTRSIZE);
}
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line. It is required for the case where we need to expose
// (not hide) the stack level adjustment. We can't use the dynamic loop in that case, because the total
// stack adjustment would not be visible to the emitter. It would be possible to use this version for
// multiple hidden constant stack level adjustments but we don't do that currently (we use the loop
// version in genStackPointerDynamicAdjustmentWithProbe instead).
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genStackPointerDynamicAdjustmentWithProbe: add a register value to the stack pointer,
// and probe the stack as appropriate.
//
// Note that for x86, we hide the ESP adjustment from the emitter. To do that, currently,
// requires a temporary register and extra code.
//
// Arguments:
// regSpDelta - the register value to add to SP. The value in this register must be negative.
// This register might be trashed.
// regTmp - an available temporary register. Will be trashed.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp)
{
assert(regSpDelta != REG_NA);
assert(regTmp != REG_NA);
// Tickle the pages to ensure that ESP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case ESP is on the last byte of the guard page. Thus you must
// touch ESP-0 first not ESP-0x1000.
//
// Another subtlety is that you don't want ESP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
// call setup would not touch the guard page but just beyond it.
//
// Note that we go through a few hoops so that ESP never points to
// illegal pages at any time during the tickling process
//
// add regSpDelta, ESP // reg now holds ultimate ESP
// jb loop // result is smaller than original ESP (no wrap around)
// xor regSpDelta, regSpDelta // Overflow, pick lowest possible number
// loop:
// test ESP, [ESP+0] // tickle the page
// mov regTmp, ESP
// sub regTmp, eeGetPageSize()
// mov ESP, regTmp
// cmp ESP, regSpDelta
// jae loop
// mov ESP, regSpDelta
BasicBlock* loop = genCreateTempLabel();
inst_RV_RV(INS_add, regSpDelta, REG_SPBASE, TYP_I_IMPL);
inst_JMP(EJ_jb, loop);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regSpDelta);
genDefineTempLabel(loop);
// Tickle the decremented value. Note that it must be done BEFORE the update of ESP since ESP might already
// be on the guard page. It is OK to leave the final value of ESP on the guard page.
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
// Subtract a page from ESP. This is a trick to avoid the emitter trying to track the
// decrement of the ESP - we do the subtraction in another reg instead of adjusting ESP directly.
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, compiler->eeGetPageSize(), EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
inst_RV_RV(INS_cmp, REG_SPBASE, regSpDelta, TYP_I_IMPL);
inst_JMP(EJ_jae, loop);
// Move the final value to ESP
inst_Mov(TYP_I_IMPL, REG_SPBASE, regSpDelta, /* canSkip */ false);
}
//------------------------------------------------------------------------
// genLclHeap: Generate code for localloc.
//
// Arguments:
// tree - the localloc tree to generate.
//
// Notes:
// Note that for x86, we don't track ESP movements while generating the localloc code.
// The ESP tracking is used to report stack pointer-relative GC info, which is not
// interesting while doing the localloc construction. Also, for functions with localloc,
// we have EBP frames, and EBP-relative locals, and ESP-relative accesses only for function
// call arguments.
//
// For x86, we store the ESP after the localloc is complete in the LocAllocSP
// variable. This variable is implicitly reported to the VM in the GC info (its position
// is defined by convention relative to other items), and is used by the GC to find the
// "base" stack pointer in functions with localloc.
//
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
regNumber targetReg = tree->GetRegNum();
regNumber regCnt = REG_NA;
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
target_ssize_t lastTouchDelta = (target_ssize_t)-1;
#ifdef DEBUG
genStackPointerCheck(compiler->opts.compStackCheckOnRet, compiler->lvaReturnSpCheck);
#endif
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
target_size_t stackAdjustment = 0;
target_size_t locAllocStackOffset = 0;
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
goto BAILOUT;
}
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
amount = AlignUp(amount, STACK_ALIGN);
}
else
{
// The localloc requested memory size is non-constant.
// Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
inst_JMP(EJ_je, endLabel);
// Compute the size of the block to allocate and perform alignment.
// If compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
// Above, we put the size in targetReg. Now, copy it to our new temp register if necessary.
inst_Mov(size->TypeGet(), regCnt, targetReg, /* canSkip */ true);
}
// Round up the number of bytes to allocate to a STACK_ALIGN boundary. This is done
// by code like:
// add reg, 15
// and reg, -16
// However, in the initialized memory case, we need the count of STACK_ALIGN-sized
// elements, not a byte count, after the alignment. So instead of the "and", which
// becomes unnecessary, generate a shift, e.g.:
// add reg, 15
// shr reg, 4
inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
if (compiler->info.compInitMem)
{
// Convert the count from a count of bytes to a loop count. We will loop once per
// stack alignment size, so each loop will zero 4 bytes on Windows/x86, and 16 bytes
// on x64 and Linux/x86.
//
// Note that we zero a single reg-size word per iteration on x86, and 2 reg-size
// words per iteration on x64. We will shift off all the stack alignment bits
// added above, so there is no need for an 'and' instruction.
// --- shr regCnt, 2 (or 4) ---
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_PTRSIZE, regCnt, STACK_ALIGN_SHIFT);
}
else
{
// Otherwise, mask off the low bits to align the byte count.
inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
}
bool initMemOrLargeAlloc; // Declaration must be separate from initialization to avoid clang compiler error.
initMemOrLargeAlloc = compiler->info.compInitMem || (amount >= compiler->eeGetPageSize()); // must be >= not >
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
// Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
// are the cases that need to be handled:
// i) Method has out-going arg area.
// It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
// Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
// ii) Method has no out-going arg area.
// Nothing to pop off from the stack.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
// If the localloc amount is a small enough constant, and we're not initializing the allocated
// memory, then don't bother popping off the ougoing arg space first; just allocate the amount
// of space needed by the allocation, and call the bottom part the new outgoing arg space.
if ((amount > 0) && !initMemOrLargeAlloc)
{
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, REG_NA);
stackAdjustment = 0;
locAllocStackOffset = (target_size_t)compiler->lvaOutgoingArgSpaceSize;
goto ALLOC_DONE;
}
inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
stackAdjustment += (target_size_t)compiler->lvaOutgoingArgSpaceSize;
locAllocStackOffset = stackAdjustment;
}
#endif
if (size->IsCnsIntOrI())
{
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
assert((amount % STACK_ALIGN) == 0);
assert((amount % REGSIZE_BYTES) == 0);
// For small allocations we will generate up to six push 0 inline
size_t cntRegSizedWords = amount / REGSIZE_BYTES;
if (compiler->info.compInitMem && (cntRegSizedWords <= 6))
{
for (; cntRegSizedWords != 0; cntRegSizedWords--)
{
inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
#ifdef TARGET_X86
bool needRegCntRegister = true;
#else // !TARGET_X86
bool needRegCntRegister = initMemOrLargeAlloc;
#endif // !TARGET_X86
if (needRegCntRegister)
{
// If compInitMem=true, we can reuse targetReg as regcnt.
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
}
}
if (!initMemOrLargeAlloc)
{
// Since the size is less than a page, and we don't need to zero init memory, simply adjust ESP.
// ESP might already be in the guard page, so we must touch it BEFORE
// the alloc, not after.
assert(amount < compiler->eeGetPageSize()); // must be < not <=
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, regCnt);
goto ALLOC_DONE;
}
// else, "mov regCnt, amount"
if (compiler->info.compInitMem)
{
// When initializing memory, we want 'amount' to be the loop count.
assert((amount % STACK_ALIGN) == 0);
amount /= STACK_ALIGN;
}
instGen_Set_Reg_To_Imm(((size_t)(int)amount == amount) ? EA_4BYTE : EA_8BYTE, regCnt, amount);
}
if (compiler->info.compInitMem)
{
// At this point 'regCnt' is set to the number of loop iterations for this loop, if each
// iteration zeros (and subtracts from the stack pointer) STACK_ALIGN bytes.
// Since we have to zero out the allocated memory AND ensure that RSP is always valid
// by tickling the pages, we will just push 0's on the stack.
assert(genIsValidIntReg(regCnt));
// Loop:
BasicBlock* loop = genCreateTempLabel();
genDefineTempLabel(loop);
static_assert_no_msg((STACK_ALIGN % REGSIZE_BYTES) == 0);
unsigned const count = (STACK_ALIGN / REGSIZE_BYTES);
for (unsigned i = 0; i < count; i++)
{
inst_IV(INS_push_hide, 0); // --- push REG_SIZE bytes of 0
}
// Note that the stack must always be aligned to STACK_ALIGN bytes
// Decrement the loop counter and loop if not done.
inst_RV(INS_dec, regCnt, TYP_I_IMPL);
inst_JMP(EJ_jne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
// Negate this value before calling the function to adjust the stack (which
// adds to ESP).
inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
regNumber regTmp = tree->GetSingleTempReg();
genStackPointerDynamicAdjustmentWithProbe(regCnt, regTmp);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate out-going arg area. Note: this also requires probes, if we have
// a very large stack adjustment! For simplicity, we use the same function used elsewhere,
// which probes the current address before subtracting. We may end up probing multiple
// times relatively "nearby".
if (stackAdjustment > 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert(lastTouchDelta >= -1);
if ((lastTouchDelta == (target_ssize_t)-1) ||
(stackAdjustment + (target_size_t)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, REG_NA);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, REG_NA);
}
}
// Return the stackalloc'ed address in result register.
// TargetReg = RSP + locAllocStackOffset
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, (int)locAllocStackOffset);
if (endLabel != nullptr)
{
genDefineTempLabel(endLabel);
}
BAILOUT:
#ifdef JIT32_GCENCODER
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
#ifdef DEBUG
// Update local variable to reflect the new stack pointer.
if (compiler->opts.compStackCheckOnRet)
{
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif
genProduceReg(tree);
}
void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
{
assert(storeBlkNode->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (storeBlkNode->OperIs(GT_STORE_OBJ))
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
assert(storeBlkNode->OperIsCopyBlkOp());
assert(storeBlkNode->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(storeBlkNode->AsObj());
return;
}
bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
switch (storeBlkNode->gtBlkOpKind)
{
#ifdef TARGET_AMD64
case GenTreeBlk::BlkOpKindHelper:
assert(!storeBlkNode->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(storeBlkNode);
}
else
{
genCodeForInitBlkHelper(storeBlkNode);
}
break;
#endif // TARGET_AMD64
case GenTreeBlk::BlkOpKindRepInstr:
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
if (isCopyBlk)
{
genCodeForCpBlkRepMovs(storeBlkNode);
}
else
{
genCodeForInitBlkRepStos(storeBlkNode);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
#endif
genCodeForCpBlkUnroll(storeBlkNode);
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
#endif
}
else
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
genCodeForInitBlkUnroll(storeBlkNode);
}
break;
default:
unreached();
}
}
//
//------------------------------------------------------------------------
// genCodeForInitBlkRepStos: Generate code for InitBlk using rep stos.
//
// Arguments:
// initBlkNode - The Block store for which we are generating code.
//
void CodeGen::genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode)
{
genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
instGen(INS_r_stosb);
}
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
regNumber srcIntReg = REG_NA;
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->AsUnOp()->gtGetOp1();
}
unsigned size = node->GetLayout()->GetSize();
// An SSE mov that accesses data larger than 8 bytes may be implemented using
// multiple memory accesses. Hence, the JIT must not use such stores when
// INITBLK zeroes a struct that contains GC pointers and can be observed by
// other threads (i.e. when dstAddr is not an address of a local).
// For example, this can happen when initializing a struct field of an object.
const bool canUse16BytesSimdMov = !node->IsOnHeapAndContainsReferences();
#ifdef TARGET_AMD64
// On Amd64 the JIT will not use SIMD stores for such structs and instead
// will always allocate a GP register for src node.
const bool willUseSimdMov = canUse16BytesSimdMov && (size >= XMM_REGSIZE_BYTES);
#else
// On X86 the JIT will use movq for structs that are larger than 16 bytes
// since it is more beneficial than using two mov-s from a GP register.
const bool willUseSimdMov = (size >= 16);
#endif
if (!src->isContained())
{
srcIntReg = genConsumeReg(src);
}
else
{
// If src is contained then it must be 0.
assert(src->IsIntegralConst(0));
assert(willUseSimdMov);
#ifdef TARGET_AMD64
assert(size >= XMM_REGSIZE_BYTES);
#else
assert(size % 8 == 0);
#endif
}
emitter* emit = GetEmitter();
assert(size <= INT32_MAX);
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (willUseSimdMov)
{
regNumber srcXmmReg = node->GetSingleTempReg(RBM_ALLFLOAT);
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
if (src->gtSkipReloadOrCopy()->IsIntegralConst(0))
{
// If the source is constant 0 then always use xorps, it's faster
// than copying the constant from a GPR to a XMM register.
emit->emitIns_R_R(INS_xorps, EA_ATTR(regSize), srcXmmReg, srcXmmReg);
}
else
{
emit->emitIns_Mov(INS_movd, EA_PTRSIZE, srcXmmReg, srcIntReg, /* canSkip */ false);
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#ifdef TARGET_X86
// For x86, we need one more to convert it from 8 bytes to 16 bytes.
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#endif
if (regSize == YMM_REGSIZE_BYTES)
{
// Extend the bytes in the lower lanes to the upper lanes
emit->emitIns_R_R_R_I(INS_vinsertf128, EA_32BYTE, srcXmmReg, srcXmmReg, srcXmmReg, 1);
}
}
instruction simdMov = simdUnalignedMovIns();
unsigned bytesWritten = 0;
while (bytesWritten < size)
{
#ifdef TARGET_X86
if (!canUse16BytesSimdMov || (bytesWritten + regSize > size))
{
simdMov = INS_movq;
regSize = 8;
}
#endif
if (bytesWritten + regSize > size)
{
assert(srcIntReg != REG_NA);
break;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
dstOffset += regSize;
bytesWritten += regSize;
if (regSize == YMM_REGSIZE_BYTES && size - bytesWritten < YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
size -= bytesWritten;
}
// Fill the remainder using normal stores.
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, dstOffset += regSize)
{
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
dstOffset -= shiftBack;
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Generate code for a load from some address + offset
// baseNode: tree node which can be either a local address or arbitrary node
// offset: distance from the baseNode from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
{
emitter* emit = GetEmitter();
if (baseNode->OperIsLocalAddr())
{
const GenTreeLclVarCommon* lclVar = baseNode->AsLclVarCommon();
offset += lclVar->GetLclOffs();
emit->emitIns_R_S(ins, size, dst, lclVar->GetLclNum(), offset);
}
else
{
emit->emitIns_R_AR(ins, size, dst, baseNode->GetRegNum(), offset);
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll - Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
const GenTreeLclVarCommon* lclVar = dstAddr->AsLclVarCommon();
dstLclNum = lclVar->GetLclNum();
dstOffset = lclVar->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
regNumber srcAddrIndexReg = REG_NA;
unsigned srcAddrIndexScale = 1;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = srcAddr->AsAddrMode();
if (addrMode->HasBase())
{
srcAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
srcAddrIndexReg = genConsumeReg(addrMode->Index());
srcAddrIndexScale = addrMode->GetScale();
}
srcOffset = addrMode->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < (INT32_MAX - static_cast<int>(size)));
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (size >= XMM_REGSIZE_BYTES)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLFLOAT);
instruction simdMov = simdUnalignedMovIns();
// Get the largest SIMD register available if the size is large enough
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
while (size >= regSize)
{
for (; size >= regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(simdMov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(simdMov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
// Size is too large for YMM moves, try stepping down to XMM size to finish SIMD copies.
if (regSize == YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
}
// Fill the remainder with normal loads/stores
if (size > 0)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLINT);
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
srcOffset -= shiftBack;
dstOffset -= shiftBack;
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkRepMovs - Generate code for CpBlk by using rep movs
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode)
{
// Destination address goes in RDI, source address goes in RSE, and size goes in RCX.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
//------------------------------------------------------------------------
// CodeGen::genMove8IfNeeded: Conditionally move 8 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// longTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (8 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// On x86, longTmpReg must be an xmm reg; on x64 it must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset)
{
#ifdef TARGET_X86
instruction longMovIns = INS_movq;
#else // !TARGET_X86
instruction longMovIns = INS_mov;
#endif // !TARGET_X86
if ((size & 8) != 0)
{
genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_LONG, longTmpReg, offset);
return 8;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove4IfNeeded: Conditionally move 4 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (4 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove4IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 4) != 0)
{
genCodeForLoadOffset(INS_mov, EA_4BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_INT, intTmpReg, offset);
return 4;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove2IfNeeded: Conditionally move 2 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (2 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove2IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 2) != 0)
{
genCodeForLoadOffset(INS_mov, EA_2BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_SHORT, intTmpReg, offset);
return 2;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove1IfNeeded: Conditionally move 1 byte of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (1 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove1IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 1) != 0)
{
genCodeForLoadOffset(INS_mov, EA_1BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_BYTE, intTmpReg, offset);
return 1;
}
return 0;
}
//---------------------------------------------------------------------------------------------------------------//
// genStructPutArgUnroll: Generates code for passing a struct arg on stack by value using loop unrolling.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct will be copied to the
// stack.
//
// TODO-Amd64-Unix: Try to share code with copyblk.
// Need refactoring of copyblk before it could be used for putarg_stk.
// The difference for now is that a putarg_stk contains its children, while cpyblk does not.
// This creates differences in code. After some significant refactoring it could be reused.
//
void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->AsOp()->gtOp1;
// We will never call this method for SIMD types, which are stored directly
// in genPutStructArgStk().
assert(src->isContained() && src->OperIs(GT_OBJ) && src->TypeIs(TYP_STRUCT));
assert(!src->AsObj()->GetLayout()->HasGCPtr());
#ifdef TARGET_X86
assert(!m_pushStkArg);
#endif
unsigned size = putArgNode->GetStackByteSize();
#ifdef TARGET_X86
assert((XMM_REGSIZE_BYTES <= size) && (size <= CPBLK_UNROLL_LIMIT));
#else // !TARGET_X86
assert(size <= CPBLK_UNROLL_LIMIT);
#endif // !TARGET_X86
if (src->AsOp()->gtOp1->isUsedFromReg())
{
genConsumeReg(src->AsOp()->gtOp1);
}
unsigned offset = 0;
regNumber xmmTmpReg = REG_NA;
regNumber intTmpReg = REG_NA;
regNumber longTmpReg = REG_NA;
if (size >= XMM_REGSIZE_BYTES)
{
xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
}
if ((size % XMM_REGSIZE_BYTES) != 0)
{
intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
}
#ifdef TARGET_X86
longTmpReg = xmmTmpReg;
#else
longTmpReg = intTmpReg;
#endif
// Let's use SSE2 to be able to do 16 byte at a time with loads and stores.
size_t slots = size / XMM_REGSIZE_BYTES;
while (slots-- > 0)
{
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
// Load
genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmTmpReg, src->gtGetOp1(), offset);
// Store
genStoreRegToStackArg(TYP_STRUCT, xmmTmpReg, offset);
offset += XMM_REGSIZE_BYTES;
}
// Fill the remainder (15 bytes or less) if there's one.
if ((size % XMM_REGSIZE_BYTES) != 0)
{
offset += genMove8IfNeeded(size, longTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove4IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove2IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove1IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
assert(offset == size);
}
}
//------------------------------------------------------------------------
// genStructPutArgRepMovs: Generates code for passing a struct arg by value on stack using Rep Movs.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Preconditions:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct bits will go.
//
void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->gtGetOp1();
assert(src->TypeGet() == TYP_STRUCT);
assert(!src->AsObj()->GetLayout()->HasGCPtr());
// Make sure we got the arguments of the cpblk operation in the right registers, and that
// 'src' is contained as expected.
assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
assert(src->isContained());
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPush: Generates code for passing a struct arg by value on stack using "push".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used only on x86, in two cases:
// - Structs 4, 8, or 12 bytes in size (less than XMM_REGSIZE_BYTES, multiple of TARGET_POINTER_SIZE).
// - Structs that contain GC pointers - they are guaranteed to be sized correctly by the VM.
//
void CodeGen::genStructPutArgPush(GenTreePutArgStk* putArgNode)
{
// On x86, any struct that contains GC references must be stored to the stack using `push` instructions so
// that the emitter properly detects the need to update the method's GC information.
//
// Strictly speaking, it is only necessary to use "push" to store the GC references themselves, so for structs
// with large numbers of consecutive non-GC-ref-typed fields, we may be able to improve the code size in the
// future.
assert(m_pushStkArg);
GenTree* src = putArgNode->Data();
GenTree* srcAddr = putArgNode->Data()->AsObj()->Addr();
regNumber srcAddrReg = srcAddr->GetRegNum();
const bool srcAddrInReg = srcAddrReg != REG_NA;
unsigned srcLclNum = 0;
unsigned srcLclOffset = 0;
if (srcAddrInReg)
{
srcAddrReg = genConsumeReg(srcAddr);
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcLclOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
ClassLayout* layout = src->AsObj()->GetLayout();
const unsigned byteSize = putArgNode->GetStackByteSize();
assert((byteSize % TARGET_POINTER_SIZE == 0) && ((byteSize < XMM_REGSIZE_BYTES) || layout->HasGCPtr()));
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
for (int i = numSlots - 1; i >= 0; --i)
{
emitAttr slotAttr = emitTypeSize(layout->GetGCPtrType(i));
const unsigned byteOffset = i * TARGET_POINTER_SIZE;
if (srcAddrInReg)
{
GetEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcAddrReg, byteOffset);
}
else
{
GetEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + byteOffset);
}
AddStackLevel(TARGET_POINTER_SIZE);
}
}
#endif // TARGET_X86
#ifndef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPartialRepMovs: Generates code for passing a struct arg by value on stack using
// a mix of pointer-sized stores, "movsq" and "rep movsd".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used on non-x86 targets (Unix x64) for structs with GC pointers.
//
void CodeGen::genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgNode)
{
// Consume these registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_NA);
GenTreeObj* src = putArgNode->gtGetOp1()->AsObj();
ClassLayout* layout = src->GetLayout();
const bool srcIsLocal = src->Addr()->OperIsLocalAddr();
const emitAttr srcAddrAttr = srcIsLocal ? EA_PTRSIZE : EA_BYREF;
#if DEBUG
unsigned numGCSlotsCopied = 0;
#endif // DEBUG
assert(layout->HasGCPtr());
const unsigned byteSize = putArgNode->GetStackByteSize();
assert(byteSize % TARGET_POINTER_SIZE == 0);
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
// No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always.
for (unsigned i = 0; i < numSlots;)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
// instead of a sequence of movsp instructions to save cycles and code size.
unsigned adjacentNonGCSlotCount = 0;
do
{
adjacentNonGCSlotCount++;
i++;
} while ((i < numSlots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-ref region, it's better just to
// emit a sequence of movsp instructions
if (adjacentNonGCSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
for (; adjacentNonGCSlotCount > 0; adjacentNonGCSlotCount--)
{
instGen(INS_movsp);
}
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
instGen(INS_r_movsp);
}
}
else
{
// We have a GC (byref or ref) pointer
// TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
// but the logic for emitting a GC info record is not available (it is internal for the emitter
// only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
// instGen(INS_movsp); and emission of gc info.
var_types memType = layout->GetGCPtrType(i);
GetEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
#ifdef DEBUG
numGCSlotsCopied++;
#endif // DEBUG
i++;
if (i < numSlots)
{
// Source for the copy operation.
// If a LocalAddr, use EA_PTRSIZE - copy from stack.
// If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
GetEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
// Always copying to the stack - outgoing arg area
// (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
}
}
}
assert(numGCSlotsCopied == layout->GetGCPtrCount());
}
#endif // !TARGET_X86
//------------------------------------------------------------------------
// If any Vector3 args are on stack and they are not pass-by-ref, the upper 32bits
// must be cleared to zeroes. The native compiler doesn't clear the upper bits
// and there is no way to know if the caller is native or not. So, the upper
// 32 bits of Vector argument on stack are always cleared to zero.
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void CodeGen::genClearStackVec3ArgUpperBits()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genClearStackVec3ArgUpperBits()\n");
}
#endif
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
for (unsigned varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvIsParam);
// Does var has simd12 type?
if (varDsc->lvType != TYP_SIMD12)
{
continue;
}
if (!varDsc->lvIsRegArg)
{
// Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
GetEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
}
else
{
// Assume that for x64 linux, an argument is fully in registers
// or fully on stack.
regNumber argReg = varDsc->GetOtherArgReg();
// Clear the upper 32 bits by two shift instructions.
// argReg = argReg << 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
// argReg = argReg >> 96
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
}
}
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#endif // FEATURE_PUT_STRUCT_ARG_STK
//
// genCodeForCpObj - Generate code for CpObj nodes to copy structs that have interleaved
// GC pointers.
//
// Arguments:
// cpObjNode - the GT_STORE_OBJ
//
// Notes:
// This will generate a sequence of movsp instructions for the cases of non-gc members.
// Note that movsp is an alias for movsd on x86 and movsq on x64.
// and calls to the BY_REF_ASSIGN helper otherwise.
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
// Make sure we got the arguments of the cpobj operation in the right registers
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
GenTree* srcAddr = nullptr;
var_types srcAddrType = TYP_BYREF;
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
// If the GenTree node has data about GC pointers, this means we're dealing
// with CpObj, so this requires special logic.
assert(cpObjNode->GetLayout()->HasGCPtr());
// MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
// and it needs src = RSI and dst = RDI.
// Either these registers must not contain lclVars, or they must be dying or marked for spill.
// This is because these registers are incremented as we go through the struct.
if (!source->IsLocal())
{
assert(source->gtOper == GT_IND);
srcAddr = source->gtGetOp1();
GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
unsigned srcLclVarNum = BAD_VAR_NUM;
unsigned dstLclVarNum = BAD_VAR_NUM;
bool isSrcAddrLiveOut = false;
bool isDstAddrLiveOut = false;
if (genIsRegCandidateLocal(actualSrcAddr))
{
srcLclVarNum = actualSrcAddr->AsLclVarCommon()->GetLclNum();
isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
if (genIsRegCandidateLocal(actualDstAddr))
{
dstLclVarNum = actualDstAddr->AsLclVarCommon()->GetLclNum();
isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
assert((actualSrcAddr->GetRegNum() != REG_RSI) || !isSrcAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
assert((actualDstAddr->GetRegNum() != REG_RDI) || !isDstAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
srcAddrType = srcAddr->TypeGet();
}
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
unsigned slots = cpObjNode->GetLayout()->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
if (slots >= CPOBJ_NONGC_SLOTS_LIMIT)
{
// If the destination of the CpObj is on the stack, make sure we allocated
// RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
instGen(INS_r_movsp);
}
else
{
// For small structs, it's better to emit a sequence of movsp than to
// emit a rep movsp instruction.
while (slots > 0)
{
instGen(INS_movsp);
slots--;
}
}
}
else
{
ClassLayout* layout = cpObjNode->GetLayout();
unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp instead of a sequence of movsp instructions
// to save cycles and code size.
unsigned nonGcSlotCount = 0;
do
{
nonGcSlotCount++;
i++;
} while ((i < slots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-gc region, it's better just to
// emit a sequence of movsp instructions
if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
while (nonGcSlotCount > 0)
{
instGen(INS_movsp);
nonGcSlotCount--;
}
}
else
{
// Otherwise, we can save code-size and improve CQ by emitting
// rep movsp (alias for movsd/movsq for x86/x64)
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
instGen(INS_r_movsp);
}
}
else
{
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
i++;
}
}
assert(gcPtrCount == 0);
}
// Clear the gcInfo for RSI and RDI.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_RSI);
gcInfo.gcMarkRegSetNpt(RBM_RDI);
}
#ifdef TARGET_AMD64
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
// generate code do a switch statement based on a table of ip-relative offsets
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
GetEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
// add it to the absolute address of fgFirstBB
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
// jmp baseReg
GetEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabOffs;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
};
GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
GetEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->GetRegNum(),
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForLockAdd: Generate code for a GT_LOCKADD node
//
// Arguments:
// node - the GT_LOCKADD node
//
void CodeGen::genCodeForLockAdd(GenTreeOp* node)
{
assert(node->OperIs(GT_LOCKADD));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitActualTypeSize(data->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg() || data->isContainedIntOrIImmed());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
instGen(INS_lock);
if (data->isContainedIntOrIImmed())
{
int imm = static_cast<int>(data->AsIntCon()->IconValue());
assert(imm == data->AsIntCon()->IconValue());
GetEmitter()->emitIns_I_AR(INS_add, size, imm, addr->GetRegNum(), 0);
}
else
{
GetEmitter()->emitIns_AR_R(INS_add, size, data->GetRegNum(), addr->GetRegNum(), 0);
}
}
//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node.
//
// Arguments:
// node - the GT_XADD/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* node)
{
assert(node->OperIs(GT_XADD, GT_XCHG));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitTypeSize(node->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
// If the destination register is different from the data register then we need
// to first move the data to the target register. Make sure we don't overwrite
// the address, the register allocator should have taken care of this.
assert((node->GetRegNum() != addr->GetRegNum()) || (node->GetRegNum() == data->GetRegNum()));
GetEmitter()->emitIns_Mov(INS_mov, size, node->GetRegNum(), data->GetRegNum(), /* canSkip */ true);
instruction ins = node->OperIs(GT_XADD) ? INS_xadd : INS_xchg;
// XCHG has an implied lock prefix when the first operand is a memory operand.
if (ins != INS_xchg)
{
instGen(INS_lock);
}
GetEmitter()->emitIns_AR_R(ins, size, node->GetRegNum(), addr->GetRegNum(), 0);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* tree)
{
assert(tree->OperIs(GT_CMPXCHG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
GenTree* location = tree->gtOpLocation; // arg1
GenTree* value = tree->gtOpValue; // arg2
GenTree* comparand = tree->gtOpComparand; // arg3
assert(location->GetRegNum() != REG_NA && location->GetRegNum() != REG_RAX);
assert(value->GetRegNum() != REG_NA && value->GetRegNum() != REG_RAX);
genConsumeReg(location);
genConsumeReg(value);
genConsumeReg(comparand);
// comparand goes to RAX;
// Note that we must issue this move after the genConsumeRegs(), in case any of the above
// have a GT_COPY from RAX.
inst_Mov(comparand->TypeGet(), REG_RAX, comparand->GetRegNum(), /* canSkip */ true);
// location is Rm
instGen(INS_lock);
GetEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->GetRegNum(), location->GetRegNum(), 0);
// Result is in RAX
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
genProduceReg(tree);
}
// generate code for BoundsCheck nodes
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree * src1, *src2;
emitJumpKind jmpKind;
instruction cmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->IsIntegralConst(0) && arrLen->isUsedFromReg())
{
// arrIndex is 0 and arrLen is in a reg. In this case
// we can generate
// test reg, reg
// since arrLen is non-negative
src1 = arrLen;
src2 = arrLen;
jmpKind = EJ_je;
cmpKind = INS_test;
}
else if (arrIndex->isContainedIntOrIImmed())
{
// arrIndex is a contained constant. In this case
// we will generate one of the following
// cmp [mem], immed (if arrLen is a memory op)
// cmp reg, immed (if arrLen is in a reg)
//
// That is arrLen cannot be a contained immed.
assert(!arrLen->isContainedIntOrIImmed());
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_jbe;
cmpKind = INS_cmp;
}
else
{
// arrIndex could either be a contained memory op or a reg
// In this case we will generate one of the following
// cmp [mem], immed (if arrLen is a constant)
// cmp [mem], reg (if arrLen is in a reg)
// cmp reg, immed (if arrIndex is in a reg)
// cmp reg1, reg2 (if arrIndex is in reg1)
// cmp reg, [mem] (if arrLen is a memory op)
//
// That is only one of arrIndex or arrLen can be a memory op.
assert(!arrIndex->isUsedFromMemory() || !arrLen->isUsedFromMemory());
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_jae;
cmpKind = INS_cmp;
}
var_types bndsChkType = src2->TypeGet();
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(cmpKind, emitTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_NULLCHECK));
assert(tree->gtOp1->isUsedFromReg());
regNumber reg = genConsumeReg(tree->gtOp1);
GetEmitter()->emitIns_AR_R(INS_cmp, emitTypeSize(tree), reg, reg, 0);
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
noway_assert(tgtReg != REG_NA);
// Subtract the lower bound for this dimension.
// TODO-XArch-CQ: make this contained if it's an immediate that fits.
inst_Mov(indexNode->TypeGet(), tgtReg, indexReg, /* canSkip */ true);
GetEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLowerBoundOffset(rank, dim));
GetEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
GenTree* arrObj = arrOffset->gtArrObj;
regNumber tgtReg = arrOffset->GetRegNum();
assert(tgtReg != REG_NA);
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
// First, consume the operands in the correct order.
regNumber offsetReg = REG_NA;
regNumber tmpReg = REG_NA;
if (!offsetNode->IsIntegralConst(0))
{
offsetReg = genConsumeReg(offsetNode);
// We will use a temp register for the offset*scale+effectiveIndex computation.
tmpReg = arrOffset->GetSingleTempReg();
}
else
{
assert(offsetNode->isContained());
}
regNumber indexReg = genConsumeReg(indexNode);
// Although arrReg may not be used in the constant-index case, if we have generated
// the value into a register, we must consume it, otherwise we will fail to end the
// live range of the gc ptr.
// TODO-CQ: Currently arrObj will always have a register allocated to it.
// We could avoid allocating a register for it, which would be of value if the arrObj
// is an on-stack lclVar.
regNumber arrReg = REG_NA;
if (arrObj->gtHasReg(compiler))
{
arrReg = genConsumeReg(arrObj);
}
if (!offsetNode->IsIntegralConst(0))
{
assert(tmpReg != REG_NA);
assert(arrReg != REG_NA);
// Evaluate tgtReg = offsetReg*dim_size + indexReg.
// tmpReg is used to load dim_size and the result of the multiplication.
// Note that dim_size will never be negative.
GetEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
inst_RV_RV(INS_imul, tmpReg, offsetReg);
if (tmpReg == tgtReg)
{
inst_RV_RV(INS_add, tmpReg, indexReg);
}
else
{
inst_Mov(TYP_I_IMPL, tgtReg, indexReg, /* canSkip */ true);
inst_RV_RV(INS_add, tgtReg, tmpReg);
}
}
else
{
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
// Operations on SIMD vectors shouldn't come this path
assert(!varTypeIsSIMD(type));
if (varTypeIsFloating(type))
{
return ins_MathOp(oper, type);
}
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_and;
break;
case GT_LSH:
ins = INS_shl;
break;
case GT_MUL:
ins = INS_imul;
break;
case GT_NEG:
ins = INS_neg;
break;
case GT_NOT:
ins = INS_not;
break;
case GT_OR:
ins = INS_or;
break;
case GT_ROL:
ins = INS_rol;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_RSH:
ins = INS_sar;
break;
case GT_RSZ:
ins = INS_shr;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_xor;
break;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
ins = INS_add;
break;
case GT_ADD_HI:
ins = INS_adc;
break;
case GT_SUB_LO:
ins = INS_sub;
break;
case GT_SUB_HI:
ins = INS_sbb;
break;
case GT_LSH_HI:
ins = INS_shld;
break;
case GT_RSH_LO:
ins = INS_shrd;
break;
#endif // !defined(TARGET_64BIT)
default:
unreached();
break;
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is either a contained constant or
// it's a register-allocated expression. If it is in a register that is
// not RCX, it will be moved to RCX (so RCX better not be in use!).
//
void CodeGen::genCodeForShift(GenTree* tree)
{
// Only the non-RMW case here.
assert(tree->OperIsShiftOrRotate());
assert(tree->AsOp()->gtOp1->isUsedFromReg());
assert(tree->GetRegNum() != REG_NA);
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
GenTree* operand = tree->gtGetOp1();
regNumber operandReg = operand->GetRegNum();
GenTree* shiftBy = tree->gtGetOp2();
if (shiftBy->isContainedIntOrIImmed())
{
emitAttr size = emitTypeSize(tree);
// Optimize "X<<1" to "lea [reg+reg]" or "add reg, reg"
if (tree->OperIs(GT_LSH) && !tree->gtOverflowEx() && !tree->gtSetFlags() && shiftBy->IsIntegralConst(1))
{
if (tree->GetRegNum() == operandReg)
{
GetEmitter()->emitIns_R_R(INS_add, size, tree->GetRegNum(), operandReg);
}
else
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, tree->GetRegNum(), operandReg, operandReg, 1, 0);
}
}
else
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
#if defined(TARGET_64BIT)
// Try to emit rorx if BMI2 is available instead of mov+rol
// it makes sense only for 64bit integers
if ((genActualType(targetType) == TYP_LONG) && (tree->GetRegNum() != operandReg) &&
compiler->compOpportunisticallyDependsOn(InstructionSet_BMI2) && tree->OperIs(GT_ROL, GT_ROR) &&
(shiftByValue > 0) && (shiftByValue < 64))
{
const int value = tree->OperIs(GT_ROL) ? (64 - shiftByValue) : shiftByValue;
GetEmitter()->emitIns_R_R_I(INS_rorx, size, tree->GetRegNum(), operandReg, value);
genProduceReg(tree);
return;
}
#endif
// First, move the operand to the destination register and
// later on perform the shift in-place.
// (LSRA will try to avoid this situation through preferencing.)
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV_SH(ins, size, tree->GetRegNum(), shiftByValue);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The operand to be shifted must not be in ECX
noway_assert(operandReg != REG_RCX);
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV(ins, tree->GetRegNum(), targetType);
}
genProduceReg(tree);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForShiftLong: Generates the code sequence for a GenTree node that
// represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is a contained constant
//
// TODO-X86-CQ: This only handles the case where the operand being shifted is in a register. We don't
// need sourceHi to be always in reg in case of GT_LSH_HI (because it could be moved from memory to
// targetReg if sourceHi is a memory operand). Similarly for GT_RSH_LO, sourceLo could be marked as
// contained memory-op. Even if not a memory-op, we could mark it as reg-optional.
//
void CodeGen::genCodeForShiftLong(GenTree* tree)
{
// Only the non-RMW case here.
genTreeOps oper = tree->OperGet();
assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
GenTree* operand = tree->AsOp()->gtOp1;
assert(operand->OperGet() == GT_LONG);
assert(operand->AsOp()->gtOp1->isUsedFromReg());
assert(operand->AsOp()->gtOp2->isUsedFromReg());
GenTree* operandLo = operand->gtGetOp1();
GenTree* operandHi = operand->gtGetOp2();
regNumber regLo = operandLo->GetRegNum();
regNumber regHi = operandHi->GetRegNum();
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(oper, targetType);
GenTree* shiftBy = tree->gtGetOp2();
assert(shiftBy->isContainedIntOrIImmed());
unsigned int count = (unsigned int)shiftBy->AsIntConCommon()->IconValue();
regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
inst_Mov(targetType, tree->GetRegNum(), regResult, /* canSkip */ true);
if (oper == GT_LSH_HI)
{
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regLo, count);
}
else
{
assert(oper == GT_RSH_LO);
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regHi, count);
}
genProduceReg(tree);
}
#endif
//------------------------------------------------------------------------
// genMapShiftInsToShiftByConstantIns: Given a general shift/rotate instruction,
// map it to the specific x86/x64 shift opcode for a shift/rotate by a constant.
// X86/x64 has a special encoding for shift/rotate-by-constant-1.
//
// Arguments:
// ins: the base shift/rotate instruction
// shiftByValue: the constant value by which we are shifting/rotating
//
instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue)
{
assert(ins == INS_rcl || ins == INS_rcr || ins == INS_rol || ins == INS_ror || ins == INS_shl || ins == INS_shr ||
ins == INS_sar);
// Which format should we use?
instruction shiftByConstantIns;
if (shiftByValue == 1)
{
// Use the shift-by-one format.
assert(INS_rcl + 1 == INS_rcl_1);
assert(INS_rcr + 1 == INS_rcr_1);
assert(INS_rol + 1 == INS_rol_1);
assert(INS_ror + 1 == INS_ror_1);
assert(INS_shl + 1 == INS_shl_1);
assert(INS_shr + 1 == INS_shr_1);
assert(INS_sar + 1 == INS_sar_1);
shiftByConstantIns = (instruction)(ins + 1);
}
else
{
// Use the shift-by-NNN format.
assert(INS_rcl + 2 == INS_rcl_N);
assert(INS_rcr + 2 == INS_rcr_N);
assert(INS_rol + 2 == INS_rol_N);
assert(INS_ror + 2 == INS_ror_N);
assert(INS_shl + 2 == INS_shl_N);
assert(INS_shr + 2 == INS_shr_N);
assert(INS_sar + 2 == INS_sar_N);
shiftByConstantIns = (instruction)(ins + 2);
}
return shiftByConstantIns;
}
//------------------------------------------------------------------------
// genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
// represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
// GT_STOREIND( AddressTree, GT_SHL( Ind ( AddressTree ), Operand ) )
//
// Arguments:
// storeIndNode: the GT_STOREIND node.
//
void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
{
GenTree* data = storeInd->Data();
assert(data->OperIsShift() || data->OperIsRotate());
// This function only handles the RMW case.
assert(data->AsOp()->gtOp1->isUsedFromMemory());
assert(data->AsOp()->gtOp1->isIndir());
assert(Lowering::IndirsAreEquivalent(data->AsOp()->gtOp1, storeInd));
assert(data->GetRegNum() == REG_NA);
var_types targetType = data->TypeGet();
genTreeOps oper = data->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr attr = EA_ATTR(genTypeSize(targetType));
GenTree* shiftBy = data->AsOp()->gtOp2;
if (shiftBy->isContainedIntOrIImmed())
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
if (shiftByValue == 1)
{
// There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
else
{
GetEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The shiftBy operand is implicit, so call the unary version of emitInsRMW.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// tree - the node.
//
void CodeGen::genCodeForLclAddr(GenTree* tree)
{
assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
emitAttr size = emitTypeSize(targetType);
inst_RV_TT(INS_lea, targetReg, tree, 0, size);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
noway_assert(targetReg != REG_NA);
#ifdef FEATURE_SIMD
// Loading of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif
noway_assert(targetType != TYP_STRUCT);
emitAttr size = emitTypeSize(targetType);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
GetEmitter()->emitIns_R_S(ins_Load(targetType), size, targetReg, varNum, offs);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
assert(tree->OperIs(GT_LCL_VAR));
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
LclVarDsc* varDsc = compiler->lvaGetDesc(tree);
bool isRegCandidate = varDsc->lvIsRegCandidate();
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
#if defined(FEATURE_SIMD) && defined(TARGET_X86)
// Loading of TYP_SIMD12 (i.e. Vector3) variable
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif // defined(FEATURE_SIMD) && defined(TARGET_X86)
var_types type = varDsc->GetRegisterType(tree);
GetEmitter()->emitIns_R_S(ins_Load(type, compiler->isSIMDTypeLocalAligned(tree->GetLclNum())),
emitTypeSize(type), tree->GetRegNum(), tree->GetLclNum(), 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_STORE_LCL_FLD));
var_types targetType = tree->TypeGet();
GenTree* op1 = tree->gtGetOp1();
noway_assert(targetType != TYP_STRUCT);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1));
assert(genTypeSize(genActualType(targetType)) == genTypeSize(genActualType(op1->TypeGet())));
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
regNumber targetReg = tree->GetRegNum();
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
unsigned lclNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
GetEmitter()->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, tree->GetLclOffs());
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else
{
GetEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// lclNode - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
{
assert(lclNode->OperIs(GT_STORE_LCL_VAR));
regNumber targetReg = lclNode->GetRegNum();
emitter* emit = GetEmitter();
GenTree* op1 = lclNode->gtGetOp1();
// Stores from a multi-reg source are handled separately.
if (op1->gtSkipReloadOrCopy()->IsMultiRegNode())
{
genMultiRegStoreToLocal(lclNode);
}
else
{
unsigned lclNum = lclNode->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
var_types targetType = varDsc->GetRegisterType(lclNode);
#ifdef DEBUG
var_types op1Type = op1->TypeGet();
if (op1Type == TYP_STRUCT)
{
assert(op1->IsLocal());
GenTreeLclVar* op1LclVar = op1->AsLclVar();
unsigned op1lclNum = op1LclVar->GetLclNum();
LclVarDsc* op1VarDsc = compiler->lvaGetDesc(op1lclNum);
op1Type = op1VarDsc->GetRegisterType(op1LclVar);
}
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1Type));
assert(!varTypeUsesFloatReg(targetType) || (emitTypeSize(targetType) == emitTypeSize(op1Type)));
#endif
#if !defined(TARGET_64BIT)
if (targetType == TYP_LONG)
{
genStoreLongLclVar(lclNode);
return;
}
#endif // !defined(TARGET_64BIT)
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(lclNode);
return;
}
#endif // FEATURE_SIMD
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
emit->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, 0);
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else if (targetReg == REG_NA)
{
// stack store
emit->emitInsStoreLcl(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
// Look for the case where we have a constant zero which we've marked for reuse,
// but which isn't actually in the register we want. In that case, it's better to create
// zero in the target register, because an xor is smaller than a copy. Note that we could
// potentially handle this in the register allocator, but we can't always catch it there
// because the target may not have a register allocated for it yet.
if (op1->isUsedFromReg() && (op1->GetRegNum() != targetReg) && (op1->IsIntegralConst(0) || op1->IsFPZero()))
{
op1->SetRegNum(REG_NA);
op1->ResetReuseRegVal();
op1->SetContained();
}
if (!op1->isUsedFromReg())
{
// Currently, we assume that the non-reg source of a GT_STORE_LCL_VAR writing to a register
// must be a constant. However, in the future we might want to support an operand used from
// memory. This is a bit tricky because we have to decide it can be used from memory before
// register allocation,
// and this would be a case where, once that's done, we need to mark that node as always
// requiring a register - which we always assume now anyway, but once we "optimize" that
// we'll have to take cases like this into account.
assert((op1->GetRegNum() == REG_NA) && op1->OperIsConst());
genSetRegToConst(targetReg, targetType, op1);
}
else
{
assert(targetReg == lclNode->GetRegNum());
assert(op1->GetRegNum() != REG_NA);
inst_Mov_Extend(targetType, /* srcInReg */ true, targetReg, op1->GetRegNum(), /* canSkip */ true,
emitTypeSize(targetType));
}
}
if (targetReg != REG_NA)
{
genProduceReg(lclNode);
}
}
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
const regNumber baseReg = genConsumeReg(base);
regNumber indexReg = genConsumeReg(index);
const regNumber dstReg = node->GetRegNum();
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(baseReg, base->TypeGet());
assert(varTypeIsIntegral(index->TypeGet()));
regNumber tmpReg = REG_NA;
#ifdef TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case that the index
// is a native int on a 64-bit platform, we will need to widen the array length and then compare.
if (index->TypeGet() == TYP_I_IMPL)
{
GetEmitter()->emitIns_R_AR(INS_mov, EA_4BYTE, tmpReg, baseReg, static_cast<int>(node->gtLenOffset));
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, indexReg, tmpReg);
}
else
#endif // TARGET_64BIT
{
GetEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, indexReg, baseReg, static_cast<int>(node->gtLenOffset));
}
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
#ifdef TARGET_64BIT
if (index->TypeGet() != TYP_I_IMPL)
{
// LEA needs 64-bit operands so we need to widen the index if it's TYP_INT.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, tmpReg, indexReg, /* canSkip */ false);
indexReg = tmpReg;
}
#endif // TARGET_64BIT
// Compute the address of the array element.
unsigned scale = node->gtElemSize;
switch (scale)
{
case 1:
case 2:
case 4:
case 8:
tmpReg = indexReg;
break;
default:
#ifdef TARGET_64BIT
// IMUL treats its immediate operand as signed so scale can't be larger than INT32_MAX.
// The VM doesn't allow such large array elements but let's be sure.
noway_assert(scale <= INT32_MAX);
#else // !TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif // !TARGET_64BIT
GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
static_cast<ssize_t>(scale));
scale = 1;
break;
}
GetEmitter()->emitIns_R_ARX(INS_lea, emitTypeSize(node->TypeGet()), dstReg, baseReg, tmpReg, scale,
static_cast<int>(node->gtElemOffset));
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types targetType = tree->TypeGet();
emitter* emit = GetEmitter();
GenTree* addr = tree->Addr();
if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
{
noway_assert(EA_ATTR(genTypeSize(targetType)) == EA_PTRSIZE);
emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->GetRegNum(), FLD_GLOBAL_FS,
(int)addr->AsIntCon()->gtIconVal);
}
else
{
genConsumeAddress(addr);
emit->emitInsLoadInd(ins_Load(targetType), emitTypeSize(tree), tree->GetRegNum(), tree);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
assert(tree->OperIs(GT_STOREIND));
#ifdef FEATURE_SIMD
// Storing Vector3 of size 12 bytes through indirection
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
var_types targetType = tree->TypeGet();
assert(!varTypeIsFloating(targetType) || (genTypeSize(targetType) == genTypeSize(data->TypeGet())));
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering registers are taken care of.
genConsumeOperands(tree);
if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
{
return;
}
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_ARG_0);
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_ARG_0);
// data goes in REG_ARG_1
genCopyRegIfNeeded(data, REG_ARG_1);
genGCWriteBarrier(tree, writeBarrierForm);
}
else
{
bool dataIsUnary = false;
bool isRMWMemoryOp = tree->IsRMWMemoryOp();
GenTree* rmwSrc = nullptr;
// We must consume the operands in the proper execution order, so that liveness is
// updated appropriately.
genConsumeAddress(addr);
// If tree represents a RMW memory op then its data is a non-leaf node marked as contained
// and non-indir operand of data is the source of RMW memory op.
if (isRMWMemoryOp)
{
assert(data->isContained() && !data->OperIsLeaf());
GenTree* rmwDst = nullptr;
dataIsUnary = (GenTree::OperIsUnary(data->OperGet()) != 0);
if (!dataIsUnary)
{
if (tree->IsRMWDstOp1())
{
rmwDst = data->gtGetOp1();
rmwSrc = data->gtGetOp2();
}
else
{
assert(tree->IsRMWDstOp2());
rmwDst = data->gtGetOp2();
rmwSrc = data->gtGetOp1();
}
genConsumeRegs(rmwSrc);
}
else
{
// *(p) = oper *(p): Here addr = p, rmwsrc=rmwDst = *(p) i.e. GT_IND(p)
// For unary RMW ops, src and dst of RMW memory op is the same. Lower
// clears operand counts on rmwSrc and we don't need to perform a
// genConsumeReg() on it.
assert(tree->IsRMWDstOp1());
rmwSrc = data->gtGetOp1();
rmwDst = data->gtGetOp1();
assert(rmwSrc->isUsedFromMemory());
}
assert(rmwSrc != nullptr);
assert(rmwDst != nullptr);
assert(Lowering::IndirsAreEquivalent(rmwDst, tree));
}
else
{
genConsumeRegs(data);
}
if (isRMWMemoryOp)
{
if (dataIsUnary)
{
// generate code for unary RMW memory ops like neg/not
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
}
else
{
if (data->OperIsShiftOrRotate())
{
// Generate code for shift RMW memory ops.
// The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
// <amount> <shift> [addr]).
assert(tree->IsRMWDstOp1());
assert(rmwSrc == data->gtGetOp2());
genCodeForShiftRMW(tree);
}
else if (data->OperGet() == GT_ADD && (rmwSrc->IsIntegralConst(1) || rmwSrc->IsIntegralConst(-1)))
{
// Generate "inc/dec [mem]" instead of "add/sub [mem], 1".
//
// Notes:
// 1) Global morph transforms GT_SUB(x, +/-1) into GT_ADD(x, -/+1).
// 2) TODO-AMD64: Debugger routine NativeWalker::Decode() runs into
// an assert while decoding ModR/M byte of "inc dword ptr [rax]".
// It is not clear whether Decode() can handle all possible
// addr modes with inc/dec. For this reason, inc/dec [mem]
// is not generated while generating debuggable code. Update
// the above if condition once Decode() routine is fixed.
assert(rmwSrc->isContainedIntOrIImmed());
instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
GetEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
}
else
{
// generate code for remaining binary RMW memory ops like add/sub/and/or/xor
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
tree, rmwSrc);
}
}
}
else
{
GetEmitter()->emitInsStoreInd(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
}
}
}
//------------------------------------------------------------------------
// genCodeForSwap: Produce code for a GT_SWAP node.
//
// Arguments:
// tree - the GT_SWAP node
//
void CodeGen::genCodeForSwap(GenTreeOp* tree)
{
assert(tree->OperIs(GT_SWAP));
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lcl1);
var_types type1 = varDsc1->TypeGet();
GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lcl2);
var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeUsesFloatReg(type1) || varTypeUsesFloatReg(type2));
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeUsesFloatReg(type1));
regNumber oldOp1Reg = lcl1->GetRegNum();
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
regNumber oldOp2Reg = lcl2->GetRegNum();
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
varDsc1->SetRegNum(oldOp2Reg);
varDsc2->SetRegNum(oldOp1Reg);
// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
size = EA_GCREF;
}
inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
}
//------------------------------------------------------------------------
// genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
// helper functions.
//
// Arguments:
// writeBarrierForm - the write barrier form to use
// addr - the address at which to do the store
// data - the data to store
//
// Return Value:
// true if an optimized write barrier form was used, false if not. If this
// function returns false, the caller must emit a "standard" write barrier.
bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data)
{
assert(writeBarrierForm != GCInfo::WBF_NoBarrier);
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
if (!genUseOptimizedWriteBarriers(writeBarrierForm))
{
return false;
}
const static int regToHelper[2][8] = {
// If the target is known to be in managed memory
{
CORINFO_HELP_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_ASSIGN_REF_EDI, // EDI
},
// Don't know if the target is in managed memory
{
CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_CHECKED_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_CHECKED_ASSIGN_REF_EDI, // EDI
},
};
noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
noway_assert(regToHelper[0][REG_ESP] == -1);
noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
noway_assert(regToHelper[1][REG_ESP] == -1);
noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
regNumber reg = data->GetRegNum();
noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
// Generate the following code:
// lea edx, addr
// call write_barrier_helper_reg
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_WRITE_BARRIER);
unsigned tgtAnywhere = 0;
if (writeBarrierForm != GCInfo::WBF_BarrierUnchecked)
{
tgtAnywhere = 1;
}
// We might want to call a modified version of genGCWriteBarrier() to get the benefit of
// the FEATURE_COUNT_GC_WRITE_BARRIERS code there, but that code doesn't look like it works
// with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
genEmitHelperCall(regToHelper[tgtAnywhere][reg],
0, // argSize
EA_PTRSIZE); // retSize
return true;
#else // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
return false;
#endif // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
}
// Produce code for a GT_CALL node
void CodeGen::genCall(GenTreeCall* call)
{
genAlignStackBeforeCall(call);
// all virtuals should have been expanded into a control expression
assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
// Insert a GS check if necessary
if (call->IsTailCallViaJitHelper())
{
if (compiler->getNeedsGSSecurityCookie())
{
#if FEATURE_FIXED_OUT_ARGS
// If either of the conditions below is true, we will need a temporary register in order to perform the GS
// cookie check. When FEATURE_FIXED_OUT_ARGS is disabled, we save and restore the temporary register using
// push/pop. When FEATURE_FIXED_OUT_ARGS is enabled, however, we need an alternative solution. For now,
// though, the tail prefix is ignored on all platforms that use fixed out args, so we should never hit this
// case.
assert(compiler->gsGlobalSecurityCookieAddr == nullptr);
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
#endif
genEmitGSCookieCheck(true);
}
}
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
assert(curArgTabEntry);
if (curArgTabEntry->GetRegNum() == REG_STK)
{
continue;
}
#ifdef UNIX_AMD64_ABI
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
regNumber argReg = curArgTabEntry->GetRegNum(regIndex++);
genConsumeReg(putArgRegNode);
// Validate the putArgRegNode has the right type.
assert(varTypeUsesFloatReg(putArgRegNode->TypeGet()) == genIsValidFloatReg(argReg));
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ false, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
}
}
else
#endif // UNIX_AMD64_ABI
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ false, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
// In the case of a varargs call,
// the ABI dictates that if we have floating point args,
// we must pass the enregistered arguments in both the
// integer and floating point registers so, let's do that.
if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode))
{
regNumber srcReg = argNode->GetRegNum();
regNumber targetReg = compiler->getCallArgIntRegister(argNode->GetRegNum());
inst_Mov(TYP_LONG, targetReg, srcReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
}
#if defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// The call will pop its arguments.
// for each putarg_stk:
target_ssize_t stackArgBytes = 0;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* arg = use.GetNode();
if (arg->OperIs(GT_PUTARG_STK) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
GenTree* source = arg->AsPutArgStk()->gtGetOp1();
unsigned size = arg->AsPutArgStk()->GetStackByteSize();
stackArgBytes += size;
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
assert(curArgTabEntry != nullptr);
assert(size == (curArgTabEntry->numSlots * TARGET_POINTER_SIZE));
#ifdef FEATURE_PUT_STRUCT_ARG_STK
if (!source->OperIs(GT_FIELD_LIST) && (source->TypeGet() == TYP_STRUCT))
{
GenTreeObj* obj = source->AsObj();
unsigned argBytes = roundUp(obj->GetLayout()->GetSize(), TARGET_POINTER_SIZE);
#ifdef TARGET_X86
// If we have an OBJ, we must have created a copy if the original arg was not a
// local and was not a multiple of TARGET_POINTER_SIZE.
// Note that on x64/ux this will be handled by unrolling in genStructPutArgUnroll.
assert((argBytes == obj->GetLayout()->GetSize()) || obj->Addr()->IsLocalAddrExpr());
#endif // TARGET_X86
assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
#endif // DEBUG
}
}
#endif // defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
GetEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that the registers used
// for the target (e.g. contained indir) are loaded into volatile registers
// that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
if (target->isContainedIndir())
{
genConsumeAddress(target->AsIndir()->Addr());
}
else
{
assert(!target->isContained());
genConsumeReg(target);
}
}
return;
}
// For a pinvoke to unmanged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
#if defined(DEBUG) && defined(TARGET_X86)
// Store the stack pointer so we can check it after the call.
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
// When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
// if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
// transition penalty, assuming the user function contains legacy SSE instruction.
// To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
// VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
// when there's preceding 256-bit AVX to legacy SSE transition penalty.
if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && GetEmitter()->Contains256bitAVX())
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
genCallInstruction(call X86_ARG(stackArgBytes));
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
#ifdef TARGET_X86
if (varTypeIsFloating(returnType))
{
// Spill the value from the fp stack.
// Then, load it into the target register.
call->gtFlags |= GTF_SPILL;
regSet.rsSpillFPStack(call);
call->gtFlags |= GTF_SPILLED;
call->gtFlags &= ~GTF_SPILL;
}
else
#endif // TARGET_X86
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc != nullptr);
const unsigned regCount = retTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = retTypeDesc->GetReturnRegType(i);
returnReg = retTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
#ifdef FEATURE_SIMD
// A Vector3 return value is stored in xmm0 and xmm1.
// RyuJIT assumes that the upper unused bits of xmm1 are cleared but
// the native compiler doesn't guarantee it.
if (call->IsUnmanaged() && (returnType == TYP_SIMD12))
{
returnReg = retTypeDesc->GetABIReturnReg(1);
// Clear the upper 32 bits by two shift instructions.
// retReg = retReg << 96
// retReg = retReg >> 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
}
#endif // FEATURE_SIMD
}
else
{
#ifdef TARGET_X86
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
// correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else
#endif // TARGET_X86
if (varTypeIsFloating(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ true);
}
genProduceReg(call);
}
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && compiler->opts.OptimizationEnabled())
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
#if defined(DEBUG) && defined(TARGET_X86)
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
if (!call->CallerPop() && (stackArgBytes != 0))
{
// ECX is trashed, so can be used to compute the expected SP. We saved the value of SP
// after pushing all the stack arguments, but the caller popped the arguments, so we need
// to do some math to figure a good comparison.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_ARG_0, stackArgBytes);
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_ARG_0, compiler->lvaCallSpCheck, 0);
}
else
{
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
BasicBlock* sp_check = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
#if !defined(FEATURE_EH_FUNCLETS)
//-------------------------------------------------------------------------
// Create a label for tracking of region protected by the monitor in synchronized methods.
// This needs to be here, rather than above where fPossibleSyncHelperCall is set,
// so the GC state vars have been updated before creating the label.
if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH))
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
switch (helperNum)
{
case CORINFO_HELP_MON_ENTER:
case CORINFO_HELP_MON_ENTER_STATIC:
noway_assert(compiler->syncStartEmitCookie == NULL);
compiler->syncStartEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncStartEmitCookie != NULL);
break;
case CORINFO_HELP_MON_EXIT:
case CORINFO_HELP_MON_EXIT_STATIC:
noway_assert(compiler->syncEndEmitCookie == NULL);
compiler->syncEndEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncEndEmitCookie != NULL);
break;
default:
break;
}
}
#endif // !FEATURE_EH_FUNCLETS
unsigned stackAdjustBias = 0;
#if defined(TARGET_X86)
// Is the caller supposed to pop the arguments?
if (call->CallerPop() && (stackArgBytes != 0))
{
stackAdjustBias = stackArgBytes;
}
SubtractStackLevel(stackArgBytes);
#endif // TARGET_X86
genRemoveAlignmentAfterCall(call, stackAdjustBias);
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes))
{
#if defined(TARGET_X86)
// If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
// adjust its stack level accordingly.
// If the caller needs to explicitly pop its arguments, we must pass a negative value, and then do the
// pop when we're done.
target_ssize_t argSizeForEmitter = stackArgBytes;
if (call->CallerPop())
{
argSizeForEmitter = -stackArgBytes;
}
#endif // defined(TARGET_X86)
// Determine return value size(s).
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
}
else
{
assert(!varTypeIsStruct(call));
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
// We need to propagate the IL offset information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
DebugInfo di;
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
#ifdef TARGET_X86
if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
{
// On x86, we need to generate a very specific pattern for indirect VSD calls:
//
// 3-byte nop
// call dword ptr [eax]
//
// Where EAX is also used as an argument to the stub dispatch helper. Make
// sure that the call target address is computed into EAX in this case.
assert(compiler->virtualStubParamInfo->GetReg() == REG_VIRTUAL_STUB_TARGET);
assert(target->isContainedIndir());
assert(target->OperGet() == GT_IND);
GenTree* addr = target->AsIndir()->Addr();
assert(addr->isUsedFromReg());
genConsumeReg(addr);
genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
GetEmitter()->emitIns_Nop(3);
// clang-format off
GetEmitter()->emitIns_Call(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
argSizeForEmitter,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, REG_VIRTUAL_STUB_TARGET, REG_NA, 1, 0);
// clang-format on
}
else
#endif
if (target->isContainedIndir())
{
// When CFG is enabled we should not be emitting any non-register indirect calls.
assert(!compiler->opts.IsCFGEnabled() ||
call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL) ||
call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL));
if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
{
// Note that if gtControlExpr is an indir of an absolute address, we mark it as
// contained only if it can be encoded as PC-relative offset.
assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
else
{
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeAddress(target->AsIndir()->Addr());
}
// clang-format off
genEmitCallIndir(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
target->AsIndir()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
call->IsFastTailCall());
// clang-format on
}
}
else
{
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
assert(genIsValidIntReg(target->GetRegNum()));
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr // addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
}
else
{
// If we have no target and this is a call with indirection cell
// then emit call through that indir cell. This means we generate e.g.
// lea r11, [addr of cell]
// call [r11]
// which is more efficent than
// lea r11, [addr of cell]
// call [addr of cell]
regNumber indirCellReg = getCallIndirectionCellReg(call);
if (indirCellReg != REG_NA)
{
// clang-format off
GetEmitter()->emitIns_Call(
emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
0,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, indirCellReg, REG_NA, 0, 0,
call->IsFastTailCall());
// clang-format on
}
#ifdef FEATURE_READYTORUN
else if (call->gtEntryPoint.addr != nullptr)
{
emitter::EmitCallType type =
(call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN : emitter::EC_FUNC_TOKEN_INDIR;
// clang-format off
genEmitCall(type,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*)call->gtEntryPoint.addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
#endif
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
if (call->gtCallType == CT_HELPER)
{
// Direct call to a helper method.
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct calls to known addresses
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
{
continue;
}
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(!varDsc->lvIsStructField || (compiler->lvaGetDesc(varDsc->lvParentLcl)->lvFieldCnt == 1));
var_types storeType = varDsc->GetActualRegisterType(); // We own the memory and can use the full move.
GetEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->GetRegNum(), varNum, 0);
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = varDsc->lvRegMask();
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
}
#endif // DEBUG
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
{
continue;
}
#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
assert(structDesc.passedInRegisters);
unsigned __int8 offset0 = 0;
unsigned __int8 offset1 = 0;
var_types type0 = TYP_UNKNOWN;
var_types type1 = TYP_UNKNOWN;
// Get the eightbyte data
compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
// Move the values into the right registers.
//
// Update varDsc->GetArgReg() and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
// argReg is going live. Note that we cannot modify varDsc->GetRegNum() and lvOtherArgReg here
// because another basic block may not be expecting it.
// Therefore manually update life of argReg. Note that GT_JMP marks
// the end of the basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
if (type0 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->GetArgReg(), varNum, offset0);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetArgReg(), type0);
}
if (type1 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->GetOtherArgReg(), varNum,
offset1);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetOtherArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetOtherArgReg(), type1);
}
if (varDsc->lvTracked)
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
else
#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
noway_assert(
isRegParamType(genActualType(varDsc->TypeGet())) ||
(varTypeIsStruct(varDsc->TypeGet()) && compiler->isTrivialPointerSizedStruct(varDsc->GetStructHnd())));
#else
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
#endif // TARGET_X86
// Is register argument already in the right register?
// If not load it from its stack location.
var_types loadType = varDsc->GetRegisterType();
#ifdef TARGET_X86
if (varTypeIsStruct(varDsc->TypeGet()))
{
// Treat trivial pointer-sized structs as a pointer sized primitive
// for the purposes of registers.
loadType = TYP_I_IMPL;
}
#endif
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varDsc->GetRegNum() != argReg)
{
assert(genIsValidReg(argReg));
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of the
// basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming dead\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing dead\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
#if defined(TARGET_AMD64)
// In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg
// register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to
// be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point
// values on the stack.
if (compFeatureVarArg() && compiler->info.compIsVarArgs)
{
regNumber intArgReg;
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varTypeIsFloating(loadType))
{
intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
else
{
intArgReg = argReg;
}
fixedIntArgMask |= genRegMask(intArgReg);
if (intArgReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#endif // TARGET_AMD64
}
#if defined(TARGET_AMD64)
// Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
// load the remaining arg registers (both int and float) from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing float/double args both in int and float arg regs.
//
// This doesn't apply to x86, which doesn't pass floating point values in floating
// point registers.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (compFeatureVarArg() && fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
// also load it in corresponding float arg reg
regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
inst_Mov(TYP_DOUBLE, floatReg, argReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
#endif // TARGET_AMD64
}
// produce code for a GT_LEA subnode
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
emitAttr size = emitTypeSize(lea);
genConsumeOperands(lea);
if (lea->Base() && lea->Index())
{
regNumber baseReg = lea->Base()->GetRegNum();
regNumber indexReg = lea->Index()->GetRegNum();
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), baseReg, indexReg, lea->gtScale, lea->Offset());
}
else if (lea->Base())
{
GetEmitter()->emitIns_R_AR(INS_lea, size, lea->GetRegNum(), lea->Base()->GetRegNum(), lea->Offset());
}
else if (lea->Index())
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), REG_NA, lea->Index()->GetRegNum(), lea->gtScale,
lea->Offset());
}
genProduceReg(lea);
}
//------------------------------------------------------------------------
// genCompareFloat: Generate code for comparing two floating point values
//
// Arguments:
// treeNode - the compare tree
//
void CodeGen::genCompareFloat(GenTree* treeNode)
{
assert(treeNode->OperIsCompare());
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
genConsumeOperands(tree);
assert(varTypeIsFloating(op1Type));
assert(op1Type == op2Type);
regNumber targetReg = treeNode->GetRegNum();
instruction ins;
emitAttr cmpAttr;
GenCondition condition = GenCondition::FromFloatRelop(treeNode);
if (condition.PreferSwap())
{
condition = GenCondition::Swap(condition);
std::swap(op1, op2);
}
ins = (op1Type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd;
cmpAttr = emitTypeSize(op1Type);
GetEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
if ((condition.GetCode() == GenCondition::FNEU) && (op1->GetRegNum() == op2->GetRegNum()))
{
// For floating point, `x != x` is a common way of
// checking for NaN. So, in the case where both
// operands are the same, we can optimize codegen
// to only do a single check.
condition = GenCondition(GenCondition::P);
}
inst_SETCC(condition, treeNode->TypeGet(), targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCompareInt: Generate code for comparing ints or, on amd64, longs.
//
// Arguments:
// treeNode - the compare tree
//
// Return Value:
// None.
void CodeGen::genCompareInt(GenTree* treeNode)
{
assert(treeNode->OperIsCompare() || treeNode->OperIs(GT_CMP));
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
bool canReuseFlags = false;
genConsumeOperands(tree);
assert(!op1->isContainedIntOrIImmed());
assert(!varTypeIsFloating(op2Type));
instruction ins;
var_types type = TYP_UNKNOWN;
if (tree->OperIs(GT_TEST_EQ, GT_TEST_NE))
{
ins = INS_test;
// Unlike many xarch instructions TEST doesn't have a form with a 16/32/64 bit first operand and
// an 8 bit immediate second operand. But if the immediate value fits in 8 bits then we can simply
// emit a 8 bit TEST instruction, unless we're targeting x86 and the first operand is a non-byteable
// register.
// Note that lowering does something similar but its main purpose is to allow memory operands to be
// contained so it doesn't handle other kind of operands. It could do more but on x86 that results
// in additional register constrains and that may be worse than wasting 3 bytes on an immediate.
if (
#ifdef TARGET_X86
(!op1->isUsedFromReg() || isByteReg(op1->GetRegNum())) &&
#endif
(op2->IsCnsIntOrI() && FitsIn<uint8_t>(op2->AsIntCon()->IconValue())))
{
type = TYP_UBYTE;
}
}
else if (op1->isUsedFromReg() && op2->IsIntegralConst(0))
{
if (compiler->opts.OptimizationEnabled())
{
emitAttr op1Size = emitActualTypeSize(op1->TypeGet());
assert((int)op1Size >= 4);
// Optimize "x<0" and "x>=0" to "x>>31" if "x" is not a jump condition and in a reg.
// Morph/Lowering are responsible to rotate "0<x" to "x>0" so we won't handle it here.
if ((targetReg != REG_NA) && tree->OperIs(GT_LT, GT_GE) && !tree->IsUnsigned())
{
inst_Mov(op1->TypeGet(), targetReg, op1->GetRegNum(), /* canSkip */ true);
if (tree->OperIs(GT_GE))
{
// emit "not" for "x>=0" case
inst_RV(INS_not, targetReg, op1->TypeGet());
}
inst_RV_IV(INS_shr_N, targetReg, (int)op1Size * 8 - 1, op1Size);
genProduceReg(tree);
return;
}
canReuseFlags = true;
}
// We're comparing a register to 0 so we can generate "test reg1, reg1"
// instead of the longer "cmp reg1, 0"
ins = INS_test;
op2 = op1;
}
else
{
ins = INS_cmp;
}
if (type == TYP_UNKNOWN)
{
if (op1Type == op2Type)
{
type = op1Type;
}
else if (genTypeSize(op1Type) == genTypeSize(op2Type))
{
// If the types are different but have the same size then we'll use TYP_INT or TYP_LONG.
// This primarily deals with small type mixes (e.g. byte/ubyte) that need to be widened
// and compared as int. We should not get long type mixes here but handle that as well
// just in case.
type = genTypeSize(op1Type) == 8 ? TYP_LONG : TYP_INT;
}
else
{
// In the types are different simply use TYP_INT. This deals with small type/int type
// mixes (e.g. byte/short ubyte/int) that need to be widened and compared as int.
// Lowering is expected to handle any mixes that involve long types (e.g. int/long).
type = TYP_INT;
}
// The common type cannot be smaller than any of the operand types, we're probably mixing int/long
assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
// Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
// If op1 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
// If op2 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op2Type) >= genTypeSize(type)) || !op2->isUsedFromMemory());
// If we ended up with a small type and op2 is a constant then make sure we don't lose constant bits
assert(!op2->IsCnsIntOrI() || !varTypeIsSmall(type) || FitsIn(type, op2->AsIntCon()->IconValue()));
}
// The type cannot be larger than the machine word size
assert(genTypeSize(type) <= genTypeSize(TYP_I_IMPL));
// TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
// Sign jump optimization should only be set the following check
assert((tree->gtFlags & GTF_RELOP_SJUMP_OPT) == 0);
if (canReuseFlags && emit->AreFlagsSetToZeroCmp(op1->GetRegNum(), emitTypeSize(type), tree->OperGet()))
{
JITDUMP("Not emitting compare due to flags being already set\n");
}
else if (canReuseFlags && emit->AreFlagsSetForSignJumpOpt(op1->GetRegNum(), emitTypeSize(type), tree))
{
JITDUMP("Not emitting compare due to sign being already set, follow up instr will transform jump\n");
tree->gtFlags |= GTF_RELOP_SJUMP_OPT;
}
else
{
emit->emitInsBinary(ins, emitTypeSize(type), op1, op2);
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromIntegralRelop(tree), tree->TypeGet(), targetReg);
genProduceReg(tree);
}
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genLongToIntCast: Generate code for long to int casts on x86.
//
// Arguments:
// cast - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// The cast node and its sources (via GT_LONG) must have been assigned registers.
// The destination cannot be a floating point type or a small integer type.
//
void CodeGen::genLongToIntCast(GenTree* cast)
{
assert(cast->OperGet() == GT_CAST);
GenTree* src = cast->gtGetOp1();
noway_assert(src->OperGet() == GT_LONG);
genConsumeRegs(src);
var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
var_types dstType = cast->CastToType();
regNumber loSrcReg = src->gtGetOp1()->GetRegNum();
regNumber hiSrcReg = src->gtGetOp2()->GetRegNum();
regNumber dstReg = cast->GetRegNum();
assert((dstType == TYP_INT) || (dstType == TYP_UINT));
assert(genIsValidIntReg(loSrcReg));
assert(genIsValidIntReg(hiSrcReg));
assert(genIsValidIntReg(dstReg));
if (cast->gtOverflow())
{
//
// Generate an overflow check for [u]long to [u]int casts:
//
// long -> int - check if the upper 33 bits are all 0 or all 1
//
// ulong -> int - check if the upper 33 bits are all 0
//
// long -> uint - check if the upper 32 bits are all 0
// ulong -> uint - check if the upper 32 bits are all 0
//
if ((srcType == TYP_LONG) && (dstType == TYP_INT))
{
BasicBlock* allOne = genCreateTempLabel();
BasicBlock* success = genCreateTempLabel();
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
inst_JMP(EJ_js, allOne);
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
inst_JMP(EJ_jmp, success);
genDefineTempLabel(allOne);
inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
genDefineTempLabel(success);
}
else
{
if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
{
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_js, SCK_OVERFLOW);
}
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
}
inst_Mov(TYP_INT, dstReg, loSrcReg, /* canSkip */ true);
genProduceReg(cast);
}
#endif
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_R(INS_test, EA_SIZE(desc.CheckSrcSize()), reg, reg);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
{
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in an immediate operand. Use a right shift to test if the
// upper 32 bits are zero. This requires a temporary register.
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
GetEmitter()->emitIns_Mov(INS_mov, EA_8BYTE, tempReg, reg, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_shr_N, EA_8BYTE, tempReg, 32);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_ja, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MIN);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_ja : EJ_jg, SCK_OVERFLOW);
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
// On x86 casts to (U)BYTE require that the source be in a byte register.
//
// TODO-XArch-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
emitter* emit = GetEmitter();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
instruction ins;
unsigned insSize;
bool canSkip = false;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = INS_movzx;
insSize = desc.ExtendSrcSize();
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = INS_movsx;
insSize = desc.ExtendSrcSize();
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
canSkip = compiler->opts.OptimizationEnabled() && emit->AreUpper32BitsZero(srcReg);
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_movsxd;
insSize = 4;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
canSkip = true;
break;
}
emit->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, canSkip);
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double or vice versa.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
// If not contained, must be a valid float reg.
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
if (srcType == dstType && (op1->isUsedFromReg() && (targetReg == op1->GetRegNum())))
{
// source and destinations types are the same and also reside in the same register.
// we just need to consume and produce the reg in this case.
;
}
else
{
instruction ins = ins_FloatConv(dstType, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidIntReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
#if !defined(TARGET_64BIT)
// We expect morph to replace long to float/double casts with helper calls
noway_assert(!varTypeIsLong(srcType));
#endif // !defined(TARGET_64BIT)
// Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we
// ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except
// for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
// as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
// Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
// temp and using temp as operand of cast operation.
if (srcType == TYP_BYREF)
{
noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
srcType = TYP_I_IMPL;
}
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
noway_assert(!varTypeIsGC(srcType));
// We should never be seeing srcType whose size is not sizeof(int) nor sizeof(long).
// For conversions from byte/sbyte/int16/uint16 to float/double, we would expect
// either the front-end or lowering phase to have generated two levels of cast.
// The first one is for widening smaller int type to int32 and the second one is
// to the float/double.
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
// Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
// here since they should have been lowered apropriately.
noway_assert(srcType != TYP_UINT);
noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
// To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
// which does a partial write to lower 4/8 bytes of xmm register keeping the other
// upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
// the partial write could introduce a false dependency and could cause a stall
// if there are further uses of xmmReg. We have such a case occurring with a
// customer reported version of SpectralNorm benchmark, resulting in 2x perf
// regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
// cvtsi2ss/sd instruction.
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->GetRegNum(), treeNode->GetRegNum());
// Note that here we need to specify srcType that will determine
// the size of source reg/mem operand and rex.w prefix.
instruction ins = ins_FloatConv(dstType, TYP_INT);
GetEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
// Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
// will interpret ULONG value as LONG. Hence we need to adjust the
// result if sign-bit of srcType is set.
if (srcType == TYP_ULONG)
{
// The instruction sequence below is less accurate than what clang
// and gcc generate. However, we keep the current sequence for backward compatibility.
// If we change the instructions below, FloatingPointUtils::convertUInt64ToDobule
// should be also updated for consistent conversion result.
assert(dstType == TYP_DOUBLE);
assert(op1->isUsedFromReg());
// Set the flags without modifying op1.
// test op1Reg, op1Reg
inst_RV_RV(INS_test, op1->GetRegNum(), op1->GetRegNum(), srcType);
// No need to adjust result if op1 >= 0 i.e. positive
// Jge label
BasicBlock* label = genCreateTempLabel();
inst_JMP(EJ_jge, label);
// Adjust the result
// result = result + 0x43f00000 00000000
// addsd resultReg, 0x43f00000 00000000
CORINFO_FIELD_HANDLE* cns = &u8ToDblBitmask;
if (*cns == nullptr)
{
double d;
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x43f0000000000000LL;
*cns = GetEmitter()->emitFltOrDblConst(d, EA_8BYTE);
}
GetEmitter()->emitIns_R_C(INS_addsd, EA_8BYTE, treeNode->GetRegNum(), *cns, 0);
genDefineTempLabel(label);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int/long
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
// TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
// For conversions to byte/sbyte/int16/uint16 from float/double, we would expect the
// front-end or lowering phase to have generated two levels of cast. The first one is
// for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
// the required smaller int type.
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
// We shouldn't be seeing uint64 here as it should have been converted
// into a helper call by either front-end or lowering phase.
noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
// If the dstType is TYP_UINT, we have 32-bits to encode the
// float number. Any of 33rd or above bits can be the sign bit.
// To achieve it we pretend as if we are converting it to a long.
if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
{
dstType = TYP_LONG;
}
// Note that we need to specify dstType here so that it will determine
// the size of destination integer register and also the rex.w prefix.
genConsumeOperands(treeNode->AsOp());
instruction ins = ins_FloatConv(TYP_INT, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
// TODO-XArch-CQ - mark the operand as contained if known to be in
// memory (e.g. field or an array element).
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
GenTree* op1 = treeNode->AsOp()->gtOp1;
var_types targetType = treeNode->TypeGet();
int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
regNumber targetReg = treeNode->GetRegNum();
// Extract exponent into a register.
regNumber tmpReg = treeNode->GetSingleTempReg();
genConsumeReg(op1);
#ifdef TARGET_64BIT
// Copy the floating-point value to an integer register. If we copied a float to a long, then
// right-shift the value so the high 32 bits of the floating-point value sit in the low 32
// bits of the integer register.
regNumber srcReg = op1->GetRegNum();
var_types targetIntType = ((targetType == TYP_FLOAT) ? TYP_INT : TYP_LONG);
inst_Mov(targetIntType, tmpReg, srcReg, /* canSkip */ false, emitActualTypeSize(targetType));
if (targetType == TYP_DOUBLE)
{
// right shift by 32 bits to get to exponent.
inst_RV_SH(INS_shr, EA_8BYTE, tmpReg, 32);
}
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
#else // !TARGET_64BIT
// If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register.
// There is no easy way to do this. To not require an extra register, we'll use shuffles
// to move the high 32 bits into the low 32 bits, then shuffle it back, since we
// need to produce the value into the target register.
//
// For TYP_DOUBLE, we'll generate (for targetReg != op1->GetRegNum()):
// movaps targetReg, op1->GetRegNum()
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // copy the value again, instead of un-shuffling it
//
// For TYP_DOUBLE with (targetReg == op1->GetRegNum()):
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX
//
// For TYP_FLOAT, it's the same as TARGET_64BIT:
// mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // only if targetReg != op1->GetRegNum()
regNumber copyToTmpSrcReg; // The register we'll copy to the integer temp.
if (targetType == TYP_DOUBLE)
{
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
copyToTmpSrcReg = targetReg;
}
else
{
copyToTmpSrcReg = op1->GetRegNum();
}
// Copy only the low 32 bits. This will be the high order 32 bits of the floating-point
// value, no matter the floating-point type.
inst_Mov(TYP_INT, tmpReg, copyToTmpSrcReg, /* canSkip */ false, emitActualTypeSize(TYP_FLOAT));
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
if ((targetType == TYP_DOUBLE) && (targetReg == op1->GetRegNum()))
{
// We need to re-shuffle the targetReg to get the correct result.
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
}
else
{
// In both the TYP_FLOAT and TYP_DOUBLE case, the op1 register is untouched,
// so copy it to the targetReg. This is faster and smaller for TYP_DOUBLE
// than re-shuffling the targetReg.
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
}
#endif // !TARGET_64BIT
genProduceReg(treeNode);
}
#ifdef TARGET_AMD64
int CodeGenInterface::genSPtoFPdelta() const
{
int delta;
#ifdef UNIX_AMD64_ABI
// We require frame chaining on Unix to support native tool unwinding (such as
// unwinding by the native debugger). We have a CLR-only extension to the
// unwind codes (UWOP_SET_FPREG_LARGE) to support SP->FP offsets larger than 240.
// If Unix ever supports EnC, the RSP == RBP assumption will have to be reevaluated.
delta = genTotalFrameSize();
#else // !UNIX_AMD64_ABI
// As per Amd64 ABI, RBP offset from initial RSP can be between 0 and 240 if
// RBP needs to be reported in unwind codes. This case would arise for methods
// with localloc.
if (compiler->compLocallocUsed)
{
// We cannot base delta computation on compLclFrameSize since it changes from
// tentative to final frame layout and hence there is a possibility of
// under-estimating offset of vars from FP, which in turn results in under-
// estimating instruction size.
//
// To be predictive and so as never to under-estimate offset of vars from FP
// we will always position FP at min(240, outgoing arg area size).
delta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize);
}
else if (compiler->opts.compDbgEnC)
{
// vm assumption on EnC methods is that rsp and rbp are equal
delta = 0;
}
else
{
delta = genTotalFrameSize();
}
#endif // !UNIX_AMD64_ABI
return delta;
}
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc. For AMD64, this does not include the caller-pushed
// return address.
//
// Return value:
// Total frame size
//
int CodeGenInterface::genTotalFrameSize() const
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
//
// We can't compute this directly from the Caller-SP, since the frame pointer
// is based on a maximum delta from Initial-SP, so first we find SP, then
// compute the FP offset.
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta;
callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
callerSPtoSPdelta -= genTotalFrameSize();
callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
if (isFramePointerUsed())
{
callerSPtoSPdelta -= REGSIZE_BYTES;
}
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
#endif // TARGET_AMD64
//-----------------------------------------------------------------------------------------
// genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
// ii) tree type is floating point type.
// iii) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE2BitwiseOp(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
regNumber operandReg = genConsumeReg(treeNode->gtGetOp1());
emitAttr size = emitTypeSize(treeNode);
assert(varTypeIsFloating(treeNode->TypeGet()));
assert(treeNode->gtGetOp1()->isUsedFromReg());
CORINFO_FIELD_HANDLE* maskFld = nullptr;
UINT64 mask = 0;
instruction ins = INS_invalid;
if (treeNode->OperIs(GT_NEG))
{
// Neg(x) = flip the sign bit.
// Neg(f) = f ^ 0x80000000 x4 (packed)
// Neg(d) = d ^ 0x8000000000000000 x2 (packed)
ins = INS_xorps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x8000000080000000UL : 0x8000000000000000UL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &negBitmaskFlt : &negBitmaskDbl;
}
else if (treeNode->OperIs(GT_INTRINSIC))
{
assert(treeNode->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Abs);
// Abs(x) = set sign-bit to zero
// Abs(f) = f & 0x7fffffff x4 (packed)
// Abs(d) = d & 0x7fffffffffffffff x2 (packed)
ins = INS_andps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x7fffffff7fffffffUL : 0x7fffffffffffffffUL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &absBitmaskFlt : &absBitmaskDbl;
}
else
{
assert(!"genSSE2BitwiseOp: unsupported oper");
}
if (*maskFld == nullptr)
{
UINT64 maskPack[] = {mask, mask};
*maskFld = GetEmitter()->emitBlkConst(&maskPack, 16, 16, treeNode->TypeGet());
}
GetEmitter()->emitIns_SIMD_R_R_C(ins, size, targetReg, operandReg, *maskFld, 0);
}
//-----------------------------------------------------------------------------------------
// genSSE41RoundOp - generate SSE41 code for the given tree as a round operation
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) SSE4.1 is supported by the underlying hardware
// ii) treeNode oper is a GT_INTRINSIC
// iii) treeNode type is a floating point type
// iv) treeNode is not used from memory
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
// vi) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE41RoundOp(GenTreeOp* treeNode)
{
// i) SSE4.1 is supported by the underlying hardware
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_SSE41));
// ii) treeNode oper is a GT_INTRINSIC
assert(treeNode->OperGet() == GT_INTRINSIC);
GenTree* srcNode = treeNode->gtGetOp1();
// iii) treeNode type is floating point type
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// iv) treeNode is not used from memory
assert(!treeNode->isUsedFromMemory());
genConsumeOperands(treeNode);
instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_roundss : INS_roundsd;
emitAttr size = emitTypeSize(treeNode);
regNumber dstReg = treeNode->GetRegNum();
unsigned ival = 0;
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Round:
ival = 4;
break;
case NI_System_Math_Ceiling:
ival = 10;
break;
case NI_System_Math_Floor:
ival = 9;
break;
case NI_System_Math_Truncate:
ival = 11;
break;
default:
ins = INS_invalid;
assert(!"genSSE41RoundOp: unsupported intrinsic");
unreached();
}
if (srcNode->isContained() || srcNode->isUsedFromSpillTemp())
{
emitter* emit = GetEmitter();
TempDsc* tmpDsc = nullptr;
unsigned varNum = BAD_VAR_NUM;
unsigned offset = (unsigned)-1;
if (srcNode->isUsedFromSpillTemp())
{
assert(srcNode->IsRegOptional());
tmpDsc = getSpillTempDsc(srcNode);
varNum = tmpDsc->tdTempNum();
offset = 0;
regSet.tmpRlsTemp(tmpDsc);
}
else if (srcNode->isIndir())
{
GenTreeIndir* memIndir = srcNode->AsIndir();
GenTree* memBase = memIndir->gtOp1;
switch (memBase->OperGet())
{
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
{
assert(memBase->isContained());
varNum = memBase->AsLclVarCommon()->GetLclNum();
offset = memBase->AsLclVarCommon()->GetLclOffs();
// Ensure that all the GenTreeIndir values are set to their defaults.
assert(memBase->GetRegNum() == REG_NA);
assert(!memIndir->HasIndex());
assert(memIndir->Scale() == 1);
assert(memIndir->Offset() == 0);
break;
}
case GT_CLS_VAR_ADDR:
{
emit->emitIns_R_C_I(ins, size, dstReg, memBase->AsClsVar()->gtClsVarHnd, 0, ival);
return;
}
default:
{
emit->emitIns_R_A_I(ins, size, dstReg, memIndir, ival);
return;
}
}
}
else
{
switch (srcNode->OperGet())
{
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = srcNode->AsDblCon();
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(dblConst->gtDconVal, emitTypeSize(dblConst));
emit->emitIns_R_C_I(ins, size, dstReg, hnd, 0, ival);
return;
}
case GT_LCL_FLD:
varNum = srcNode->AsLclFld()->GetLclNum();
offset = srcNode->AsLclFld()->GetLclOffs();
break;
case GT_LCL_VAR:
{
assert(srcNode->IsRegOptional() || !compiler->lvaGetDesc(srcNode->AsLclVar())->lvIsRegCandidate());
varNum = srcNode->AsLclVar()->GetLclNum();
offset = 0;
break;
}
default:
unreached();
break;
}
}
// Ensure we got a good varNum and offset.
// We also need to check for `tmpDsc != nullptr` since spill temp numbers
// are negative and start with -1, which also happens to be BAD_VAR_NUM.
assert((varNum != BAD_VAR_NUM) || (tmpDsc != nullptr));
assert(offset != (unsigned)-1);
emit->emitIns_R_S_I(ins, size, dstReg, varNum, offset, ival);
}
else
{
inst_RV_RV_IV(ins, size, dstReg, srcNode->GetRegNum(), ival);
}
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
// Handle intrinsics that can be implemented by target-specific instructions
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genSSE2BitwiseOp(treeNode);
break;
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
genSSE41RoundOp(treeNode->AsOp());
break;
case NI_System_Math_Sqrt:
{
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
genConsumeOperands(treeNode->AsOp());
const instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_sqrtss : INS_sqrtsd;
GetEmitter()->emitInsBinary(ins, emitTypeSize(treeNode), treeNode, srcNode);
break;
}
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//-------------------------------------------------------------------------- //
// getBaseVarForPutArgStk - returns the baseVarNum for passing a stack arg.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// The number of the base variable.
//
// Note:
// If tail call the outgoing args are placed in the caller's incoming arg stack space.
// Otherwise, they go in the outgoing arg area on the current frame.
//
// On Windows the caller always creates slots (homing space) in its frame for the
// first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
// For System V systems there is no such calling convention requirement, and the code needs to find
// the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with GetArgReg() equals to REG_STK.
//
unsigned CodeGen::getBaseVarForPutArgStk(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_PUTARG_STK);
unsigned baseVarNum;
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->AsPutArgStk()->putInIncomingArgArea())
{
// See the note in the function header re: finding the first stack passed argument.
baseVarNum = getFirstArgWithStackSlot();
assert(baseVarNum != BAD_VAR_NUM);
#ifdef DEBUG
// This must be a fast tail call.
assert(treeNode->AsPutArgStk()->gtCall->AsCall()->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(baseVarNum);
assert(varDsc != nullptr);
#ifdef UNIX_AMD64_ABI
assert(!varDsc->lvIsRegArg && varDsc->GetArgReg() == REG_STK);
#else // !UNIX_AMD64_ABI
// On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
assert(varDsc->lvIsRegArg && (varDsc->GetArgReg() == REG_ARG_0 || varDsc->GetArgReg() == REG_FLTARG_0));
#endif // !UNIX_AMD64_ABI
#endif // !DEBUG
}
else
{
#if FEATURE_FIXED_OUT_ARGS
baseVarNum = compiler->lvaOutgoingArgSpaceVar;
#else // !FEATURE_FIXED_OUT_ARGS
assert(!"No BaseVarForPutArgStk on x86");
baseVarNum = BAD_VAR_NUM;
#endif // !FEATURE_FIXED_OUT_ARGS
}
return baseVarNum;
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// putArgStk - the putArgStk node.
//
void CodeGen::genAlignStackBeforeCall(GenTreePutArgStk* putArgStk)
{
#if defined(UNIX_X86_ABI)
genAlignStackBeforeCall(putArgStk->gtCall);
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// call - the call node.
//
void CodeGen::genAlignStackBeforeCall(GenTreeCall* call)
{
#if defined(UNIX_X86_ABI)
// Have we aligned the stack yet?
if (!call->fgArgInfo->IsStkAlignmentDone())
{
// We haven't done any stack alignment yet for this call. We might need to create
// an alignment adjustment, even if this function itself doesn't have any stack args.
// This can happen if this function call is part of a nested call sequence, and the outer
// call has already pushed some arguments.
unsigned stkLevel = genStackLevel + call->fgArgInfo->GetStkSizeBytes();
call->fgArgInfo->ComputeStackAlignment(stkLevel);
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
if (padStkAlign != 0)
{
// Now generate the alignment
inst_RV_IV(INS_sub, REG_SPBASE, padStkAlign, EA_PTRSIZE);
AddStackLevel(padStkAlign);
AddNestedAlignment(padStkAlign);
}
call->fgArgInfo->SetStkAlignmentDone();
}
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genRemoveAlignmentAfterCall: After a call, remove the alignment
// added before the call, if any.
//
// Arguments:
// call - the call node.
// bias - additional stack adjustment
//
// Note:
// When bias > 0, caller should adjust stack level appropriately as
// bias is not considered when adjusting stack level.
//
void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias)
{
#if defined(TARGET_X86)
#if defined(UNIX_X86_ABI)
// Put back the stack pointer if there was any padding for stack alignment
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
unsigned padStkAdjust = padStkAlign + bias;
if (padStkAdjust != 0)
{
inst_RV_IV(INS_add, REG_SPBASE, padStkAdjust, EA_PTRSIZE);
SubtractStackLevel(padStkAlign);
SubtractNestedAlignment(padStkAlign);
}
#else // UNIX_X86_ABI
if (bias != 0)
{
if (bias == sizeof(int))
{
inst_RV(INS_pop, REG_ECX, TYP_INT);
}
else
{
inst_RV_IV(INS_add, REG_SPBASE, bias, EA_PTRSIZE);
}
}
#endif // !UNIX_X86_ABI_
#else // TARGET_X86
assert(bias == 0);
#endif // !TARGET_X86
}
#ifdef TARGET_X86
//---------------------------------------------------------------------
// genAdjustStackForPutArgStk:
// adjust the stack pointer for a putArgStk node if necessary.
//
// Arguments:
// putArgStk - the putArgStk node.
//
// Returns: true if the stack pointer was adjusted; false otherwise.
//
// Notes:
// Sets `m_pushStkArg` to true if the stack arg needs to be pushed,
// false if the stack arg needs to be stored at the current stack
// pointer address. This is exactly the opposite of the return value
// of this function.
//
bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk)
{
const unsigned argSize = putArgStk->GetStackByteSize();
GenTree* source = putArgStk->gtGetOp1();
#ifdef FEATURE_SIMD
if (!source->OperIs(GT_FIELD_LIST) && varTypeIsSIMD(source))
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
#endif // FEATURE_SIMD
#ifdef DEBUG
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
case GenTreePutArgStk::Kind::Unroll:
assert(!source->AsObj()->GetLayout()->HasGCPtr());
break;
case GenTreePutArgStk::Kind::Push:
case GenTreePutArgStk::Kind::PushAllSlots:
assert(source->OperIs(GT_FIELD_LIST) || source->AsObj()->GetLayout()->HasGCPtr() ||
(argSize < XMM_REGSIZE_BYTES));
break;
default:
unreached();
}
#endif // DEBUG
// In lowering (see "LowerPutArgStk") we have determined what sort of instructions
// are going to be used for this node. If we'll not be using "push"es, the stack
// needs to be adjusted first (s. t. the SP points to the base of the outgoing arg).
//
if (!putArgStk->isPushKind())
{
// If argSize is large, we need to probe the stack like we do in the prolog (genAllocLclFrame)
// or for localloc (genLclHeap), to ensure we touch the stack pages sequentially, and don't miss
// the stack guard pages. The prolog probes, but we don't know at this point how much higher
// the last probed stack pointer value is. We default a threshold. Any size below this threshold
// we are guaranteed the stack has been probed. Above this threshold, we don't know. The threshold
// should be high enough to cover all common cases. Increasing the threshold means adding a few
// more "lowest address of stack" probes in the prolog. Since this is relatively rare, add it to
// stress modes.
if ((argSize >= ARG_STACK_PROBE_THRESHOLD_BYTES) ||
compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)argSize, REG_NA);
}
else
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
}
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
// Otherwise, "push" will be adjusting the stack for us.
m_pushStkArg = true;
return false;
}
//---------------------------------------------------------------------
// genPutArgStkFieldList - generate code for passing a GT_FIELD_LIST arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node whose op1 is a GT_FIELD_LIST
//
// Return value:
// None
//
void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
{
GenTreeFieldList* const fieldList = putArgStk->gtOp1->AsFieldList();
assert(fieldList != nullptr);
// Set m_pushStkArg and pre-adjust the stack if necessary.
const bool preAdjustedStack = genAdjustStackForPutArgStk(putArgStk);
// For now, we only support the "push" case; we will push a full slot for the first field of each slot
// within the struct.
assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
// If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
// (Note that this mode is not currently being used.)
// If we are pushing the arguments (i.e. we have not pre-adjusted the stack), then we are pushing them
// in reverse order, so we start with the current field offset at the size of the struct arg (which must be
// a multiple of the target pointer size).
unsigned currentOffset = (preAdjustedStack) ? 0 : putArgStk->GetStackByteSize();
unsigned prevFieldOffset = currentOffset;
regNumber intTmpReg = REG_NA;
regNumber simdTmpReg = REG_NA;
if (putArgStk->AvailableTempRegCount() != 0)
{
regMaskTP rsvdRegs = putArgStk->gtRsvdRegs;
if ((rsvdRegs & RBM_ALLINT) != 0)
{
intTmpReg = putArgStk->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(intTmpReg));
}
if ((rsvdRegs & RBM_ALLFLOAT) != 0)
{
simdTmpReg = putArgStk->GetSingleTempReg(RBM_ALLFLOAT);
assert(genIsValidFloatReg(simdTmpReg));
}
assert(genCountBits(rsvdRegs) == (unsigned)((intTmpReg == REG_NA) ? 0 : 1) + ((simdTmpReg == REG_NA) ? 0 : 1));
}
for (GenTreeFieldList::Use& use : fieldList->Uses())
{
GenTree* const fieldNode = use.GetNode();
const unsigned fieldOffset = use.GetOffset();
var_types fieldType = use.GetType();
// Long-typed nodes should have been handled by the decomposition pass, and lowering should have sorted the
// field list in descending order by offset.
assert(!varTypeIsLong(fieldType));
assert(fieldOffset <= prevFieldOffset);
// Consume the register, if any, for this field. Note that genConsumeRegs() will appropriately
// update the liveness info for a lclVar that has been marked RegOptional, which hasn't been
// assigned a register, and which is therefore contained.
// Unlike genConsumeReg(), it handles the case where no registers are being consumed.
genConsumeRegs(fieldNode);
regNumber argReg = fieldNode->isUsedFromSpillTemp() ? REG_NA : fieldNode->GetRegNum();
// If the field is slot-like, we can use a push instruction to store the entire register no matter the type.
//
// The GC encoder requires that the stack remain 4-byte aligned at all times. Round the adjustment up
// to the next multiple of 4. If we are going to generate a `push` instruction, the adjustment must
// not require rounding.
// NOTE: if the field is of GC type, we must use a push instruction, since the emitter is not otherwise
// able to detect stores into the outgoing argument area of the stack on x86.
const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevFieldOffset - fieldOffset) >= 4);
int adjustment = roundUp(currentOffset - fieldOffset, 4);
if (fieldIsSlot && !varTypeIsSIMD(fieldType))
{
fieldType = genActualType(fieldType);
unsigned pushSize = genTypeSize(fieldType);
assert((pushSize % 4) == 0);
adjustment -= pushSize;
while (adjustment != 0)
{
inst_IV(INS_push, 0);
currentOffset -= pushSize;
AddStackLevel(pushSize);
adjustment -= pushSize;
}
m_pushStkArg = true;
}
else
{
m_pushStkArg = false;
// We always "push" floating point fields (i.e. they are full slot values that don't
// require special handling).
assert(varTypeIsIntegralOrI(fieldNode) || varTypeIsSIMD(fieldNode));
// If we can't push this field, it needs to be in a register so that we can store
// it to the stack location.
if (adjustment != 0)
{
// This moves the stack pointer to fieldOffset.
// For this case, we must adjust the stack and generate stack-relative stores rather than pushes.
// Adjust the stack pointer to the next slot boundary.
inst_RV_IV(INS_sub, REG_SPBASE, adjustment, EA_PTRSIZE);
currentOffset -= adjustment;
AddStackLevel(adjustment);
}
// Does it need to be in a byte register?
// If so, we'll use intTmpReg, which must have been allocated as a byte register.
// If it's already in a register, but not a byteable one, then move it.
if (varTypeIsByte(fieldType) && ((argReg == REG_NA) || ((genRegMask(argReg) & RBM_BYTE_REGS) == 0)))
{
assert(intTmpReg != REG_NA);
noway_assert((genRegMask(intTmpReg) & RBM_BYTE_REGS) != 0);
if (argReg != REG_NA)
{
inst_Mov(fieldType, intTmpReg, argReg, /* canSkip */ false);
argReg = intTmpReg;
}
}
}
if (argReg == REG_NA)
{
if (m_pushStkArg)
{
if (fieldNode->isUsedFromSpillTemp())
{
assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
assert(fieldNode->IsRegOptional());
TempDsc* tmp = getSpillTempDsc(fieldNode);
GetEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
regSet.tmpRlsTemp(tmp);
}
else
{
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_TT(INS_push, fieldNode, 0, 0, emitActualTypeSize(fieldNode->TypeGet()));
break;
case GT_CNS_INT:
if (fieldNode->IsIconHandle())
{
inst_IV_handle(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
break;
default:
unreached();
}
}
currentOffset -= TARGET_POINTER_SIZE;
AddStackLevel(TARGET_POINTER_SIZE);
}
else
{
// The stack has been adjusted and we will load the field to intTmpReg and then store it on the stack.
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_RV_TT(INS_mov, intTmpReg, fieldNode);
break;
case GT_CNS_INT:
genSetRegToConst(intTmpReg, fieldNode->TypeGet(), fieldNode);
break;
default:
unreached();
}
genStoreRegToStackArg(fieldType, intTmpReg, fieldOffset - currentOffset);
}
}
else
{
#if defined(FEATURE_SIMD)
if (fieldType == TYP_SIMD12)
{
assert(genIsValidFloatReg(simdTmpReg));
genStoreSIMD12ToStack(argReg, simdTmpReg);
}
else
#endif // defined(FEATURE_SIMD)
{
genStoreRegToStackArg(fieldType, argReg, fieldOffset - currentOffset);
}
if (m_pushStkArg)
{
// We always push a slot-rounded size
currentOffset -= genTypeSize(fieldType);
}
}
prevFieldOffset = fieldOffset;
}
if (currentOffset != 0)
{
// We don't expect padding at the beginning of a struct, but it could happen with explicit layout.
inst_RV_IV(INS_sub, REG_SPBASE, currentOffset, EA_PTRSIZE);
AddStackLevel(currentOffset);
}
}
#endif // TARGET_X86
//---------------------------------------------------------------------
// genPutArgStk - generate code for passing an arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* data = putArgStk->gtOp1;
var_types targetType = genActualType(data->TypeGet());
#ifdef TARGET_X86
genAlignStackBeforeCall(putArgStk);
if ((data->OperGet() != GT_FIELD_LIST) && varTypeIsStruct(targetType))
{
(void)genAdjustStackForPutArgStk(putArgStk);
genPutStructArgStk(putArgStk);
return;
}
// On a 32-bit target, all of the long arguments are handled with GT_FIELD_LISTs of TYP_INT.
assert(targetType != TYP_LONG);
const unsigned argSize = putArgStk->GetStackByteSize();
assert((argSize % TARGET_POINTER_SIZE) == 0);
if (data->isContainedIntOrIImmed())
{
if (data->IsIconHandle())
{
inst_IV_handle(INS_push, data->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, data->AsIntCon()->gtIconVal);
}
AddStackLevel(argSize);
}
else if (data->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(putArgStk);
}
else
{
// We should not see any contained nodes that are not immediates.
assert(data->isUsedFromReg());
genConsumeReg(data);
genPushReg(targetType, data->GetRegNum());
}
#else // !TARGET_X86
{
unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
#ifdef UNIX_AMD64_ABI
if (data->OperIs(GT_FIELD_LIST))
{
genPutArgStkFieldList(putArgStk, baseVarNum);
return;
}
else if (varTypeIsStruct(targetType))
{
m_stkArgVarNum = baseVarNum;
m_stkArgOffset = putArgStk->getArgOffset();
genPutStructArgStk(putArgStk);
m_stkArgVarNum = BAD_VAR_NUM;
return;
}
#endif // UNIX_AMD64_ABI
noway_assert(targetType != TYP_STRUCT);
// Get argument offset on stack.
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffset = putArgStk->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(putArgStk->gtCall, putArgStk);
assert(curArgTabEntry != nullptr);
assert(argOffset == curArgTabEntry->slotNum * TARGET_POINTER_SIZE);
#endif
if (data->isContainedIntOrIImmed())
{
GetEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
(int)data->AsIntConCommon()->IconValue());
}
else
{
assert(data->isUsedFromReg());
genConsumeReg(data);
GetEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->GetRegNum(), baseVarNum,
argOffset);
}
}
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
#ifndef UNIX_AMD64_ABI
assert(targetType != TYP_STRUCT);
#endif // !UNIX_AMD64_ABI
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
#ifdef TARGET_X86
// genPushReg: Push a register value onto the stack and adjust the stack level
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
//
// Notes:
// For TYP_LONG, the srcReg must be a floating point register.
// Otherwise, the register type must be consistent with the given type.
//
void CodeGen::genPushReg(var_types type, regNumber srcReg)
{
unsigned size = genTypeSize(type);
if (varTypeIsIntegralOrI(type) && type != TYP_LONG)
{
assert(genIsValidIntReg(srcReg));
inst_RV(INS_push, srcReg, type);
}
else
{
instruction ins;
emitAttr attr = emitTypeSize(type);
if (type == TYP_LONG)
{
// On x86, the only way we can push a TYP_LONG from a register is if it is in an xmm reg.
// This is only used when we are pushing a struct from memory to memory, and basically is
// handling an 8-byte "chunk", as opposed to strictly a long type.
ins = INS_movq;
}
else
{
ins = ins_Store(type);
}
assert(genIsValidFloatReg(srcReg));
inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
}
AddStackLevel(size);
}
#endif // TARGET_X86
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
// genStoreRegToStackArg: Store a register value into the stack argument area
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
// offset - the offset from the base (see Assumptions below)
//
// Notes:
// A type of TYP_STRUCT instructs this method to store a 16-byte chunk
// at the given offset (i.e. not the full struct).
//
// Assumptions:
// The caller must set the context appropriately before calling this method:
// - On x64, m_stkArgVarNum must be set according to whether this is a regular or tail call.
// - On x86, the caller must set m_pushStkArg if this method should push the argument.
// Otherwise, the argument is stored at the given offset from sp.
//
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
//
void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset)
{
assert(srcReg != REG_NA);
instruction ins;
emitAttr attr;
unsigned size;
if (type == TYP_STRUCT)
{
ins = INS_movdqu;
// This should be changed!
attr = EA_8BYTE;
size = 16;
}
else
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(type))
{
assert(genIsValidFloatReg(srcReg));
ins = ins_Store(type); // TODO-CQ: pass 'aligned' correctly
}
else
#endif // FEATURE_SIMD
#ifdef TARGET_X86
if (type == TYP_LONG)
{
assert(genIsValidFloatReg(srcReg));
ins = INS_movq;
}
else
#endif // TARGET_X86
{
assert((varTypeUsesFloatReg(type) && genIsValidFloatReg(srcReg)) ||
(varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg)));
ins = ins_Store(type);
}
attr = emitTypeSize(type);
size = genTypeSize(type);
}
#ifdef TARGET_X86
if (m_pushStkArg)
{
genPushReg(type, srcReg);
}
else
{
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
}
#else // !TARGET_X86
assert(m_stkArgVarNum != BAD_VAR_NUM);
GetEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutStructArgStk - generate code for copying a struct arg on the stack by value.
// In case there are references to heap object in the struct,
// it generates the gcinfo as well.
//
// Arguments
// putArgStk - the GT_PUTARG_STK node
//
// Notes:
// In the case of fixed out args, the caller must have set m_stkArgVarNum to the variable number
// corresponding to the argument area (where we will put the argument on the stack).
// For tail calls this is the baseVarNum = 0.
// For non tail calls this is the outgoingArgSpace.
//
void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* source = putArgStk->gtGetOp1();
var_types targetType = source->TypeGet();
#if defined(TARGET_X86) && defined(FEATURE_SIMD)
if (putArgStk->isSIMD12())
{
genPutArgStkSIMD12(putArgStk);
return;
}
#endif // defined(TARGET_X86) && defined(FEATURE_SIMD)
if (varTypeIsSIMD(targetType))
{
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
genStoreRegToStackArg(targetType, srcReg, 0);
return;
}
assert(targetType == TYP_STRUCT);
ClassLayout* layout = source->AsObj()->GetLayout();
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
genStructPutArgRepMovs(putArgStk);
break;
#ifndef TARGET_X86
case GenTreePutArgStk::Kind::PartialRepInstr:
genStructPutArgPartialRepMovs(putArgStk);
break;
#endif // !TARGET_X86
case GenTreePutArgStk::Kind::Unroll:
genStructPutArgUnroll(putArgStk);
break;
#ifdef TARGET_X86
case GenTreePutArgStk::Kind::Push:
genStructPutArgPush(putArgStk);
break;
#endif // TARGET_X86
default:
unreached();
}
}
#endif // defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Create and record GC Info for the function.
*/
#ifndef JIT32_GCENCODER
void
#else // !JIT32_GCENCODER
void*
#endif // !JIT32_GCENCODER
CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
{
#ifdef JIT32_GCENCODER
return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
#else // !JIT32_GCENCODER
genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
#endif // !JIT32_GCENCODER
}
#ifdef JIT32_GCENCODER
void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
BYTE headerBuf[64];
InfoHdr header;
int s_cached;
#ifdef FEATURE_EH_FUNCLETS
// We should do this before gcInfoBlockHdrSave since varPtrTableSize must be finalized before it
if (compiler->ehAnyFunclets())
{
gcInfo.gcMarkFilterVarsPinned();
}
#endif
#ifdef DEBUG
size_t headerSize =
#endif
compiler->compInfoBlkSize =
gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
size_t argTabOffset = 0;
size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
#if DISPLAY_SIZES
if (GetInterruptible())
{
gcHeaderISize += compiler->compInfoBlkSize;
gcPtrMapISize += ptrMapSize;
}
else
{
gcHeaderNSize += compiler->compInfoBlkSize;
gcPtrMapNSize += ptrMapSize;
}
#endif // DISPLAY_SIZES
compiler->compInfoBlkSize += ptrMapSize;
/* Allocate the info block for the method */
compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
#if 0 // VERBOSE_SIZES
// TODO-X86-Cleanup: 'dataSize', below, is not defined
// if (compiler->compInfoBlkSize > codeSize && compiler->compInfoBlkSize > 100)
{
printf("[%7u VM, %7u+%7u/%7u x86 %03u/%03u%%] %s.%s\n",
compiler->info.compILCodeSize,
compiler->compInfoBlkSize,
codeSize + dataSize,
codeSize + dataSize - prologSize - epilogSize,
100 * (codeSize + dataSize) / compiler->info.compILCodeSize,
100 * (codeSize + dataSize + compiler->compInfoBlkSize) / compiler->info.compILCodeSize,
compiler->info.compClassName,
compiler->info.compMethodName);
}
#endif
/* Fill in the info block and return it to the caller */
void* infoPtr = compiler->compInfoBlkAddr;
/* Create the method info block: header followed by GC tracking tables */
compiler->compInfoBlkAddr +=
gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
#ifdef DEBUG
if (0)
{
BYTE* temp = (BYTE*)infoPtr;
size_t size = compiler->compInfoBlkAddr - temp;
BYTE* ptab = temp + headerSize;
noway_assert(size == headerSize + ptrMapSize);
printf("Method info block - header [%zu bytes]:", headerSize);
for (unsigned i = 0; i < size; i++)
{
if (temp == ptab)
{
printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
}
else
{
if (!(i % 16))
printf("\n %04X: ", i);
}
printf("%02X ", *temp++);
}
printf("\n");
}
#endif // DEBUG
#if DUMP_GC_TABLES
if (compiler->opts.dspGCtbls)
{
const BYTE* base = (BYTE*)infoPtr;
size_t size;
unsigned methodSize;
InfoHdr dumpHeader;
printf("GC Info for method %s\n", compiler->info.compFullName);
printf("GC info size = %3u\n", compiler->compInfoBlkSize);
size = gcInfo.gcInfoBlockHdrDump(base, &dumpHeader, &methodSize);
// printf("size of header encoding is %3u\n", size);
printf("\n");
if (compiler->opts.dspGCtbls)
{
base += size;
size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
// printf("size of pointer table is %3u\n", size);
printf("\n");
noway_assert(compiler->compInfoBlkAddr == (base + size));
}
}
#endif // DUMP_GC_TABLES
/* Make sure we ended up generating the expected number of bytes */
noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
return infoPtr;
}
#else // !JIT32_GCENCODER
void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
{
preservedAreaSize += REGSIZE_BYTES;
}
// bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
preservedAreaSize += 4;
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
#endif // !JIT32_GCENCODER
/*****************************************************************************
* Emit a call to a helper function.
*
*/
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
{
void* addr = nullptr;
void* pAddr = nullptr;
emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
regNumber callTarget = REG_NA;
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
if (!addr)
{
assert(pAddr != nullptr);
// Absolute indirect call addr
// Note: Order of checks is important. First always check for pc-relative and next
// zero-relative. Because the former encoding is 1-byte smaller than the latter.
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)pAddr) ||
genCodeIndirAddrCanBeEncodedAsZeroRelOffset((size_t)pAddr))
{
// generate call whose target is specified by 32-bit offset relative to PC or zero.
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = pAddr;
}
else
{
#ifdef TARGET_AMD64
// If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
// load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
// make the call.
// mov reg, addr
// call [reg]
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & killMask) == callTargetMask);
}
else
{
// The call target must not overwrite any live variable, though it may not be in the
// kill set for the call.
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & regSet.GetMaskVars()) == RBM_NONE);
}
#endif
callTarget = callTargetReg;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, callTarget, (ssize_t)pAddr);
callType = emitter::EC_INDIR_ARD;
}
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
callTarget, // ireg
REG_NA, 0, 0, // xreg, xmul, disp
false // isJump
);
// clang-format on
regSet.verifyRegistersUsed(killMask);
}
/*****************************************************************************
* Unit testing of the XArch emitter: generate a bunch of instructions into the prolog
* (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
* disassembler thinks the instructions as the same as we do.
*/
// Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
// After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
//#define ALL_XARCH_EMITTER_UNIT_TESTS
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void CodeGen::genAmd64EmitterUnitTests()
{
if (!verbose)
{
return;
}
if (!compiler->opts.altJit)
{
// No point doing this in a "real" JIT.
return;
}
// Mark the "fake" instructions in the output.
printf("*************** In genAmd64EmitterUnitTests()\n");
// We use this:
// genDefineTempLabel(genCreateTempLabel());
// to create artificial labels to help separate groups of tests.
//
// Loads
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef ALL_XARCH_EMITTER_UNIT_TESTS
genDefineTempLabel(genCreateTempLabel());
// vhaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
#endif // ALL_XARCH_EMITTER_UNIT_TESTS
printf("*************** End of genAmd64EmitterUnitTests()\n");
}
#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
#ifdef PROFILING_SUPPORTED
#ifdef TARGET_X86
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. This variable remains unchanged.
//
// Return Value:
// None
//
// Notes:
// The x86 profile enter helper has the following requirements (see ProfileEnterNaked in
// VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileEnterHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. All registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
unsigned saveStackLvl2 = genStackLevel;
// Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK()
// for x86 stack unwinding
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
#endif // UNIX_X86_ABI
// Push the profilerHandle
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER,
0, // argSize. Again, we have to lie about it
EA_UNKNOWN); // retSize
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
// Notes:
// The x86 profile leave/tailcall helper has the following requirements (see ProfileLeaveNaked and
// ProfileTailcallNaked in VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileLeaveHelper or ProfileTailcallHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. helper == CORINFO_HELP_PROF_FCN_LEAVE: All registers are preserved.
// helper == CORINFO_HELP_PROF_FCN_TAILCALL: Only argument registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
// Need to save on to the stack level, since the helper call will pop the argument
unsigned saveStackLvl2 = genStackLevel;
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
AddStackLevel(0xC);
AddNestedAlignment(0xC);
#endif // UNIX_X86_ABI
//
// Push the profilerHandle
//
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
genSinglePush();
#if defined(UNIX_X86_ABI)
int argSize = -REGSIZE_BYTES; // negative means caller-pop (cdecl)
#else
int argSize = REGSIZE_BYTES;
#endif
genEmitHelperCall(helper, argSize, EA_UNKNOWN /* retSize */);
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
SubtractStackLevel(0x10);
SubtractNestedAlignment(0xC);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
#endif // TARGET_X86
#ifdef TARGET_AMD64
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
#if !defined(UNIX_AMD64_ABI)
unsigned varNum;
LclVarDsc* varDsc;
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// Home all arguments passed in arg registers (RCX, RDX, R8 and R9).
// In case of vararg methods, arg regs are already homed.
//
// Note: Here we don't need to worry about updating gc'info since enter
// callback is generated as part of prolog which is non-gc interruptible.
// Moreover GC cannot kick while executing inside profiler callback which is a
// profiler requirement so it can examine arguments which could be obj refs.
if (!compiler->info.compIsVarArgs)
{
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types storeType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction store_ins = ins_Store(storeType);
#ifdef FEATURE_SIMD
if ((storeType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
store_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_S_R(store_ins, emitTypeSize(storeType), argReg, varNum, 0);
}
}
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_8BYTE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
// TODO-AMD64-CQ: Rather than reloading, see if this could be optimized by combining with prolog
// generation logic that moves args around as required by first BB entry point conditions
// computed by LSRA. Code pointers for investigating this further: genFnPrologCalleeRegArgs()
// and genEnregisterIncomingStackArgs().
//
// Now reload arg registers from home locations.
// Vararg methods:
// - we need to reload only known (i.e. fixed) reg args.
// - if floating point type, also reload it into corresponding integer reg
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction load_ins = ins_Load(loadType);
#ifdef FEATURE_SIMD
if ((loadType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
load_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_R_S(load_ins, emitTypeSize(loadType), argReg, varNum, 0);
if (compFeatureVarArg() && compiler->info.compIsVarArgs && varTypeIsFloating(loadType))
{
regNumber intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
}
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#else // !defined(UNIX_AMD64_ABI)
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// R14 = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_0,
(ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_ENTER_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// R15 = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_1, genFramePointerReg(), -callerSPOffset);
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#endif // !defined(UNIX_AMD64_ABI)
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
#if !defined(UNIX_AMD64_ABI)
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// If thisPtr needs to be kept alive and reported, it cannot be one of the callee trash
// registers that profiler callback kills.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg())
{
regMaskTP thisPtrMask = genRegMask(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum());
noway_assert((RBM_PROFILER_LEAVE_TRASH & thisPtrMask) == 0);
}
// At this point return value is computed and stored in RAX or XMM0.
// On Amd64, Leave callback preserves the return register. We keep
// RAX alive by not reporting as trashed by helper call. Also note
// that GC cannot kick-in while executing inside profiler callback,
// which is a requirement of profiler as well since it needs to examine
// return value which could be an obj ref.
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of an address.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// TODO-AMD64-Cleanup: Once we start doing codegen after final frame layout, retain the "if" portion
// of the stmnts to execute unconditionally and clean-up rest.
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
// Caller's SP relative offset to FramePointer will be negative. We need to add absolute
// value of that offset to FramePointer to obtain caller's SP value.
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
// If we are here means that it is a tentative frame layout during which we
// cannot use caller's SP offset since it is an estimate. For now we require the
// method to have at least a single arg so that we can use it to obtain caller's
// SP.
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RCX, RDX) for call target.
// We use R8 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r8, helper addr; call r8"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_ARG_2);
#else // !defined(UNIX_AMD64_ABI)
// RDI = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RSI = caller's SP
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
#endif // !defined(UNIX_AMD64_ABI)
}
#endif // TARGET_AMD64
#endif // PROFILING_SUPPORTED
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genOSRRecordTier0CalleeSavedRegistersAndFrame: for OSR methods, record the
// subset of callee saves already saved by the Tier0 method, and the frame
// created by Tier0.
//
void CodeGen::genOSRRecordTier0CalleeSavedRegistersAndFrame()
{
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves was already saved by Tier0.
// Emit appropriate unwind.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
int const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
JITDUMP("--OSR--- tier0 has already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("\n");
// We must account for the Tier0 callee saves.
//
// These have already happened at method entry; all these
// unwind records should be at offset 0.
//
// RBP is always aved by Tier0 and always pushed first.
//
assert((tier0IntCalleeSaves & RBM_FPBASE) == RBM_FPBASE);
compiler->unwindPush(REG_RBP);
tier0IntCalleeSaves &= ~RBM_FPBASE;
// Now the rest of the Tier0 callee saves.
//
for (regNumber reg = REG_INT_LAST; tier0IntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & tier0IntCalleeSaves) != 0)
{
compiler->unwindPush(reg);
}
tier0IntCalleeSaves &= ~regBit;
}
// We must account for the post-callee-saves push SP movement
// done by the Tier0 frame and by the OSR transition.
//
// tier0FrameSize is the Tier0 FP-SP delta plus the fake call slot added by
// JIT_Patchpoint. We add one slot to account for the saved FP.
//
// We then need to subtract off the size the Tier0 callee saves as SP
// adjusts for those will have been modelled by the unwind pushes above.
//
int const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
int const tier0NetSize = tier0FrameSize - tier0IntCalleeSaveUsedSize;
compiler->unwindAllocStack(tier0NetSize);
}
//------------------------------------------------------------------------
// genOSRSaveRemainingCalleeSavedRegisters: save any callee save registers
// that Tier0 didn't save.
//
// Notes:
// This must be invoked after SP has been adjusted to allocate the local
// frame, because of how the UnwindSave records are interpreted.
//
// We rely on the fact that other "local frame" allocation actions (like
// stack probing) will not trash callee saves registers.
//
void CodeGen::genOSRSaveRemainingCalleeSavedRegisters()
{
// We should be generating the prolog of an OSR root frame.
//
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves still needs saving.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
regMaskTP const osrIntCalleeSaves = rsPushRegs & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP osrAdditionalIntCalleeSaves = osrIntCalleeSaves & ~tier0IntCalleeSaves;
JITDUMP("---OSR--- int callee saves are ");
JITDUMPEXEC(dspRegMask(osrIntCalleeSaves));
JITDUMP("; tier0 already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("; so only saving ");
JITDUMPEXEC(dspRegMask(osrAdditionalIntCalleeSaves));
JITDUMP("\n");
// These remaining callee saves will be stored in the Tier0 callee save area
// below any saves already done by Tier0. Compute the offset.
//
// The OSR method doesn't actually use its callee save area.
//
int const osrFrameSize = compiler->compLclFrameSize;
int const tier0FrameSize = patchpointInfo->TotalFrameSize();
int const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
int const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
int offset = osrFrameSize + osrCalleeSaveSize + osrFramePointerSize + tier0FrameSize - tier0IntCalleeSaveUsedSize;
// The tier0 frame is always an RBP frame, so the OSR method should never need to save RBP.
//
assert((tier0CalleeSaves & RBM_FPBASE) == RBM_FPBASE);
assert((osrAdditionalIntCalleeSaves & RBM_FPBASE) == RBM_NONE);
// The OSR method must use MOVs to save additional callee saves.
//
for (regNumber reg = REG_INT_LAST; osrAdditionalIntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & osrAdditionalIntCalleeSaves) != 0)
{
GetEmitter()->emitIns_AR_R(INS_mov, EA_8BYTE, reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
offset -= REGSIZE_BYTES;
}
osrAdditionalIntCalleeSaves &= ~regBit;
}
}
#endif // TARGET_AMD64
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
void CodeGen::genPushCalleeSavedRegisters()
{
assert(compiler->compGeneratingProlog);
#if DEBUG
// OSR root frames must handle this differently. See
// genOSRRecordTier0CalleeSavedRegisters()
// genOSRSaveRemainingCalleeSavedRegisters()
//
if (compiler->opts.IsOSR())
{
assert(compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT);
}
#endif
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On X86/X64 we have already pushed the FP (frame-pointer) prior to calling this method
if (isFramePointerUsed())
{
rsPushRegs &= ~RBM_FPBASE;
}
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
// Push backwards so we match the order we will pop them in the epilog
// and all the other code that expects it to be in this order.
for (regNumber reg = REG_INT_LAST; rsPushRegs != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & rsPushRegs) != 0)
{
inst_RV(INS_push, reg, TYP_REF);
compiler->unwindPush(reg);
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(REGSIZE_BYTES);
}
#endif // USING_SCOPE_INFO
rsPushRegs &= ~regBit;
}
}
}
void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
#ifdef TARGET_AMD64
const bool isFunclet = compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT;
const bool doesSupersetOfNormalPops = compiler->opts.IsOSR() && !isFunclet;
// OSR methods must restore all registers saved by either the OSR or
// the Tier0 method. First restore any callee save not saved by
// Tier0, then the callee saves done by Tier0.
//
// OSR funclets do normal restores.
//
if (doesSupersetOfNormalPops)
{
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP tier0CalleeSaves =
((regMaskTP)compiler->info.compPatchpointInfo->CalleeSaveRegisters()) & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP additionalCalleeSaves = rsPopRegs & ~tier0CalleeSaves;
// Registers saved by the OSR prolog.
//
genPopCalleeSavedRegistersFromMask(additionalCalleeSaves);
// Registers saved by the Tier0 prolog.
// Tier0 frame pointer will be restored separately.
//
genPopCalleeSavedRegistersFromMask(tier0CalleeSaves & ~RBM_FPBASE);
return;
}
#endif // TARGET_AMD64
// Registers saved by a normal prolog
//
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
const unsigned popCount = genPopCalleeSavedRegistersFromMask(rsPopRegs);
noway_assert(compiler->compCalleeRegsPushed == popCount);
}
//------------------------------------------------------------------------
// genPopCalleeSavedRegistersFromMask: pop specified set of callee saves
// in the "standard" order
//
unsigned CodeGen::genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs)
{
unsigned popCount = 0;
if ((rsPopRegs & RBM_EBX) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EBX, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_FPBASE) != 0)
{
// EBP cannot be directly modified for EBP frame and double-aligned frames
assert(!doubleAlignOrFramePointerUsed());
popCount++;
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#ifndef UNIX_AMD64_ABI
// For System V AMD64 calling convention ESI and EDI are volatile registers.
if ((rsPopRegs & RBM_ESI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_ESI, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_EDI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EDI, TYP_I_IMPL);
}
#endif // !defined(UNIX_AMD64_ABI)
#ifdef TARGET_AMD64
if ((rsPopRegs & RBM_R12) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R12, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R13) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R13, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R14) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R14, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R15) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R15, TYP_I_IMPL);
}
#endif // TARGET_AMD64
// Amd64/x86 doesn't support push/pop of xmm registers.
// These will get saved to stack separately after allocating
// space on stack in prolog sequence. PopCount is essentially
// tracking the count of integer registers pushed.
return popCount;
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts
#ifdef DEBUG
genInterruptibleUsed = true;
#endif
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n__epilog:\n");
}
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif
// Restore float registers that were saved to stack before SP is modified.
genRestoreCalleeSavedFltRegs(compiler->compLclFrameSize);
#ifdef JIT32_GCENCODER
// When using the JIT32 GC encoder, we do not start the OS-reported portion of the epilog until after
// the above call to `genRestoreCalleeSavedFltRegs` because that function
// a) does not actually restore any registers: there are none when targeting the Windows x86 ABI,
// which is the only target that uses the JIT32 GC encoder
// b) may issue a `vzeroupper` instruction to eliminate AVX -> SSE transition penalties.
// Because the `vzeroupper` instruction is not recognized by the VM's unwinder and there are no
// callee-save FP restores that the unwinder would need to see, we can avoid the need to change the
// unwinder (and break binary compat with older versions of the runtime) by starting the epilog
// after any `vzeroupper` instruction has been emitted. If either of the above conditions changes,
// we will need to rethink this.
GetEmitter()->emitStartEpilog();
#endif
/* Compute the size in bytes we've pushed/popped */
bool removeEbpFrame = doubleAlignOrFramePointerUsed();
#ifdef TARGET_AMD64
// We only remove the EBP frame using the frame pointer (using `lea rsp, [rbp + const]`)
// if we reported the frame pointer in the prolog. The Windows x64 unwinding ABI specifically
// disallows this `lea` form:
//
// See https://docs.microsoft.com/en-us/cpp/build/prolog-and-epilog?view=msvc-160#epilog-code
//
// "When a frame pointer is not used, the epilog must use add RSP,constant to deallocate the fixed part of the
// stack. It may not use lea RSP,constant[RSP] instead. This restriction exists so the unwind code has fewer
// patterns to recognize when searching for epilogs."
//
// Otherwise, we must use `add RSP, constant`, as stated. So, we need to use the same condition
// as genFnProlog() used in determining whether to report the frame pointer in the unwind data.
// This is a subset of the `doubleAlignOrFramePointerUsed()` cases.
//
if (removeEbpFrame)
{
const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
removeEbpFrame = removeEbpFrame && reportUnwindData;
}
#endif // TARGET_AMD64
if (!removeEbpFrame)
{
// We have an ESP frame */
noway_assert(compiler->compLocallocUsed == false); // Only used with frame-pointer
/* Get rid of our local variables */
unsigned int frameSize = compiler->compLclFrameSize;
#ifdef TARGET_AMD64
// OSR must remove the entire OSR frame and the Tier0 frame down to the bottom
// of the used part of the Tier0 callee save area.
//
if (compiler->opts.IsOSR())
{
// The patchpoint TotalFrameSize is SP-FP delta (plus "call" slot added by JIT_Patchpoint)
// so does not account for the Tier0 push of FP, so we add in an extra stack slot to get the
// offset to the top of the Tier0 callee saves area.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP const tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const osrIntCalleeSaves = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const allIntCalleeSaves = osrIntCalleeSaves | tier0IntCalleeSaves;
unsigned const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(allIntCalleeSaves) * REGSIZE_BYTES;
unsigned const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
unsigned const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
unsigned const osrAdjust =
tier0FrameSize - tier0IntCalleeSaveUsedSize + osrCalleeSaveSize + osrFramePointerSize;
JITDUMP("OSR epilog adjust factors: tier0 frame %u, tier0 callee saves -%u, osr callee saves %u, osr "
"framePointer %u\n",
tier0FrameSize, tier0IntCalleeSaveUsedSize, osrCalleeSaveSize, osrFramePointerSize);
JITDUMP(" OSR frame size %u; net osr adjust %u, result %u\n", frameSize, osrAdjust,
frameSize + osrAdjust);
frameSize += osrAdjust;
}
#endif // TARGET_AMD64
if (frameSize > 0)
{
#ifdef TARGET_X86
/* Add 'compiler->compLclFrameSize' to ESP */
/* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
if ((frameSize == TARGET_POINTER_SIZE) && !compiler->compJmpOpUsed)
{
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
else
#endif // TARGET_X86
{
/* Add 'compiler->compLclFrameSize' to ESP */
/* Generate "add esp, <stack-size>" */
inst_RV_IV(INS_add, REG_SPBASE, frameSize, EA_PTRSIZE);
}
}
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// In the case where we have an RSP frame, and no frame pointer reported in the OS unwind info,
// but we do have a pushed frame pointer and established frame chain, we do need to pop RBP.
//
// OSR methods must always pop RBP (pushed by Tier0 frame)
if (doubleAlignOrFramePointerUsed() || compiler->opts.IsOSR())
{
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#endif // TARGET_AMD64
}
else
{
noway_assert(doubleAlignOrFramePointerUsed());
// We don't support OSR for methods that must report an FP in unwind.
//
assert(!compiler->opts.IsOSR());
/* Tear down the stack frame */
bool needMovEspEbp = false;
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
//
// add esp, compLclFrameSize
//
// We need not do anything (except the "mov esp, ebp") if
// compiler->compCalleeRegsPushed==0. However, this is unlikely, and it
// also complicates the code manager. Hence, we ignore that case.
noway_assert(compiler->compLclFrameSize != 0);
inst_RV_IV(INS_add, REG_SPBASE, compiler->compLclFrameSize, EA_PTRSIZE);
needMovEspEbp = true;
}
else
#endif // DOUBLE_ALIGN
{
bool needLea = false;
if (compiler->compLocallocUsed)
{
// OSR not yet ready for localloc
assert(!compiler->opts.IsOSR());
// ESP may be variable if a localloc was actually executed. Reset it.
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
needLea = true;
}
else if (!regSet.rsRegsModified(RBM_CALLEE_SAVED))
{
if (compiler->compLclFrameSize != 0)
{
#ifdef TARGET_AMD64
// AMD64 can't use "mov esp, ebp", according to the ABI specification describing epilogs. So,
// do an LEA to "pop off" the frame allocation.
needLea = true;
#else // !TARGET_AMD64
// We will just generate "mov esp, ebp" and be done with it.
needMovEspEbp = true;
#endif // !TARGET_AMD64
}
}
else if (compiler->compLclFrameSize == 0)
{
// do nothing before popping the callee-saved registers
}
#ifdef TARGET_X86
else if (compiler->compLclFrameSize == REGSIZE_BYTES)
{
// "pop ecx" will make ESP point to the callee-saved registers
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
#endif // TARGET_X86
else
{
// We need to make ESP point to the callee-saved registers
needLea = true;
}
if (needLea)
{
int offset;
#ifdef TARGET_AMD64
// lea esp, [ebp + compiler->compLclFrameSize - genSPtoFPdelta]
//
// Case 1: localloc not used.
// genSPToFPDelta = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize
// offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
// The amount to be subtracted from RBP to point at callee saved int regs.
//
// Case 2: localloc used
// genSPToFPDelta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize)
// Offset = Amount to be added to RBP to point at callee saved int regs.
offset = genSPtoFPdelta() - compiler->compLclFrameSize;
// Offset should fit within a byte if localloc is not used.
if (!compiler->compLocallocUsed)
{
noway_assert(offset < UCHAR_MAX);
}
#else
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
noway_assert(offset < UCHAR_MAX); // the offset fits in a byte
#endif
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -offset);
}
}
//
// Pop the callee-saved registers (if any)
//
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// Extra OSR adjust to get to where RBP was saved by the tier0 frame.
//
// Note the other callee saves made in that frame are dead, the current method
// will save and restore what it needs.
if (compiler->opts.IsOSR())
{
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
// Use add since we know the SP-to-FP delta of the original method.
// We also need to skip over the slot where we pushed RBP.
//
// If we ever allow the original method to have localloc this will
// need to change.
inst_RV_IV(INS_add, REG_SPBASE, tier0FrameSize + TARGET_POINTER_SIZE, EA_PTRSIZE);
}
assert(!needMovEspEbp); // "mov esp, ebp" is not allowed in AMD64 epilogs
#else // !TARGET_AMD64
if (needMovEspEbp)
{
// mov esp, ebp
inst_Mov(TYP_I_IMPL, REG_SPBASE, REG_FPBASE, /* canSkip */ false);
}
#endif // !TARGET_AMD64
// pop ebp
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
GetEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
/* Check if this a special return block i.e.
* CEE_JMP instruction */
if (jmpEpilog)
{
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode());
// figure out what jump we have
GenTree* jmpNode = block->lastNode();
#if !FEATURE_FASTTAILCALL
// x86
noway_assert(jmpNode->gtOper == GT_JMP);
#else
// amd64
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
CORINFO_METHOD_HANDLE methHnd = (CORINFO_METHOD_HANDLE)jmpNode->AsVal()->gtVal1;
CORINFO_CONST_LOOKUP addrInfo;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
if (addrInfo.accessType != IAT_VALUE && addrInfo.accessType != IAT_PVALUE)
{
NO_WAY("Unsupported JMP indirection");
}
// If we have IAT_PVALUE we might need to jump via register indirect, as sometimes the
// indirection cell can't be reached by the jump.
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
if (addrInfo.accessType == IAT_PVALUE)
{
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)addrInfo.addr))
{
// 32 bit displacement will work
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
else
{
// 32 bit displacement won't work
callType = emitter::EC_INDIR_ARD;
indCallReg = REG_RAX;
addr = nullptr;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
regSet.verifyRegUsed(indCallReg);
}
}
else
{
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN // retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN), // secondRetSize
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, REG_NA, 0, 0, /* ireg, xreg, xmul, disp */
true /* isJump */
);
// clang-format on
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
unsigned stkArgSize = 0; // Zero on all platforms except x86
#if defined(TARGET_X86)
bool fCalleePop = true;
// varargs has caller pop
if (compiler->info.compIsVarArgs)
fCalleePop = false;
if (IsCallerPop(compiler->info.compCallConv))
fCalleePop = false;
if (fCalleePop)
{
noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
}
#ifdef UNIX_X86_ABI
// The called function must remove hidden address argument from the stack before returning
// in case of struct returning according to cdecl calling convention on linux.
// Details: http://www.sco.com/developers/devspecs/abi386-4.pdf pages 40-43
if (compiler->info.compCallConv == CorInfoCallConvExtension::C && compiler->info.compRetBuffArg != BAD_VAR_NUM)
stkArgSize += TARGET_POINTER_SIZE;
#endif // UNIX_X86_ABI
#endif // TARGET_X86
/* Return, popping our arguments (if any) */
instGen_Return(stkArgSize);
}
}
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_AMD64)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: rcx = InitialSP, rdx = the exception object that was caught (see GT_CATCH_ARG)
* filter: rcx = InitialSP, rdx = the exception object to filter (see GT_CATCH_ARG)
* finally/fault: rcx = InitialSP
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: rax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: rax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The AMD64 funclet prolog sequence is:
*
* push ebp
* push callee-saved regs
* ; TODO-AMD64-CQ: We probably only need to save any callee-save registers that we actually use
* ; in the funclet. Currently, we save the same set of callee-saved regs calculated for
* ; the entire function.
* sub sp, XXX ; Establish the rest of the frame.
* ; XXX is determined by lvaOutgoingArgSpaceSize plus space for the PSP slot, aligned
* ; up to preserve stack alignment. If we push an odd number of registers, we also
* ; generate this, to keep the stack aligned.
*
* ; Fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested
* ; filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet
* ; epilog.
* ; Also, re-establish the frame pointer from the PSP.
*
* mov rbp, [rcx + PSP_slot_InitialSP_offset] ; Load the PSP (InitialSP of the main function stored in the
* ; PSP of the dynamically containing funclet or function)
* mov [rsp + PSP_slot_InitialSP_offset], rbp ; store the PSP in our frame
* lea ebp, [rbp + Function_InitialSP_to_FP_delta] ; re-establish the frame pointer of the parent frame. If
* ; Function_InitialSP_to_FP_delta==0, we don't need this
* ; instruction.
*
* The epilog sequence is then:
*
* add rsp, XXX
* pop callee-saved regs ; if necessary
* pop rbp
* ret
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
* | Return address |
* |-----------------------|
* | Saved EBP |
* |-----------------------|
* |Callee saved registers |
* |-----------------------|
* ~ possible 8 byte pad ~
* ~ for alignment ~
* |-----------------------|
* | PSP slot | // Omitted in CoreRT ABI
* |-----------------------|
* | Outgoing arg space | // this only exists if the function makes a call
* |-----------------------| <---- Initial SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* TODO-AMD64-Bug?: the frame pointer should really point to the PSP slot (the debugger seems to assume this
* in DacDbiInterfaceImpl::InitParentFrameInfo()), or someplace above Initial-SP. There is an AMD64
* UNWIND_INFO restriction that it must be within 240 bytes of Initial-SP. See jit64\amd64\inc\md.h
* "FRAMEPTR OFFSETS" for details.
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
assert(!regSet.rsRegsModified(RBM_FPBASE));
assert(block != nullptr);
assert(block->bbFlags & BBF_FUNCLET_BEG);
assert(isFramePointerUsed());
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// We need to push ebp, since it's callee-saved.
// We need to push the callee-saved registers. We only need to push the ones that we need, but we don't
// keep track of that on a per-funclet basis, so we push the same set as in the main function.
// The only fixed-size frame we need to allocate is whatever is big enough for the PSPSym, since nothing else
// is stored here (all temps are allocated in the parent frame).
// We do need to allocate the outgoing argument space, in case there are calls here. This must be the same
// size as the parent frame's outgoing argument space, to keep the PSPSym offset the same.
inst_RV(INS_push, REG_FPBASE, TYP_REF);
compiler->unwindPush(REG_FPBASE);
// Callee saved int registers are pushed to stack.
genPushCalleeSavedRegisters();
regMaskTP maskArgRegsLiveIn;
if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_ARG_0;
}
else
{
maskArgRegsLiveIn = RBM_ARG_0 | RBM_ARG_2;
}
regNumber initReg = REG_EBP; // We already saved EBP, so it can be trashed
bool initRegZeroed = false;
genAllocLclFrame(genFuncletInfo.fiSpDelta, initReg, &initRegZeroed, maskArgRegsLiveIn);
// Callee saved float registers are copied to stack in their assigned stack slots
// after allocating space for them as part of funclet frame.
genPreserveCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done.
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
GetEmitter()->emitIns_R_AR(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_ARG_0, genFuncletInfo.fiPSP_slot_InitialSP_offset);
regSet.verifyRegUsed(REG_FPBASE);
GetEmitter()->emitIns_AR_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, genFuncletInfo.fiPSP_slot_InitialSP_offset);
if (genFuncletInfo.fiFunction_InitialSP_to_FP_delta != 0)
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE,
genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
}
// We've modified EBP, but not really. Say that we haven't...
regSet.rsRemoveRegsModified(RBM_FPBASE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*
* Note that we don't do anything with unwind codes, because AMD64 only cares about unwind codes for the prolog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Restore callee saved XMM regs from their stack slots before modifying SP
// to position at callee saved int regs.
genRestoreCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
inst_RV_IV(INS_add, REG_SPBASE, genFuncletInfo.fiSpDelta, EA_PTRSIZE);
genPopCalleeSavedRegisters();
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
// Note that compLclFrameSize can't be used (for can we call functions that depend on it),
// because we're not going to allocate the same size frame as the parent.
assert(isFramePointerUsed());
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be
// finalized
assert(compiler->compCalleeFPRegsSavedMask != (regMaskTP)-1); // The float registers to be preserved is finalized
// Even though lvaToInitialSPRelativeOffset() depends on compLclFrameSize,
// that's ok, because we're figuring out an offset in the parent frame.
genFuncletInfo.fiFunction_InitialSP_to_FP_delta =
compiler->lvaToInitialSPRelativeOffset(0, true); // trick to find the Initial-SP-relative offset of the frame
// pointer.
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
#ifndef UNIX_AMD64_ABI
// No 4 slots for outgoing params on the stack for System V systems.
assert((compiler->lvaOutgoingArgSpaceSize == 0) ||
(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES))); // On AMD64, we always have 4 outgoing argument
// slots if there are any calls in the function.
#endif // UNIX_AMD64_ABI
unsigned offset = compiler->lvaOutgoingArgSpaceSize;
genFuncletInfo.fiPSP_slot_InitialSP_offset = offset;
// How much stack do we allocate in the funclet?
// We need to 16-byte align the stack.
unsigned totalFrameSize =
REGSIZE_BYTES // return address
+ REGSIZE_BYTES // pushed EBP
+ (compiler->compCalleeRegsPushed * REGSIZE_BYTES); // pushed callee-saved int regs, not including EBP
// Entire 128-bits of XMM register is saved to stack due to ABI encoding requirement.
// Copying entire XMM register to/from memory will be performant if SP is aligned at XMM_REGSIZE_BYTES boundary.
unsigned calleeFPRegsSavedSize = genCountBits(compiler->compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES;
unsigned FPRegsPad = (calleeFPRegsSavedSize > 0) ? AlignmentPad(totalFrameSize, XMM_REGSIZE_BYTES) : 0;
unsigned PSPSymSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? REGSIZE_BYTES : 0;
totalFrameSize += FPRegsPad // Padding before pushing entire xmm regs
+ calleeFPRegsSavedSize // pushed callee-saved float regs
// below calculated 'pad' will go here
+ PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
unsigned pad = AlignmentPad(totalFrameSize, 16);
genFuncletInfo.fiSpDelta = FPRegsPad // Padding to align SP on XMM_REGSIZE_BYTES boundary
+ calleeFPRegsSavedSize // Callee saved xmm regs
+ pad + PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Function InitialSP-to-FP delta: %d\n", genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
printf(" SP delta: %d\n", genFuncletInfo.fiSpDelta);
printf(" PSP slot Initial SP offset: %d\n", genFuncletInfo.fiPSP_slot_InitialSP_offset);
}
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(genFuncletInfo.fiPSP_slot_InitialSP_offset ==
compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
// funclet!
}
#endif // DEBUG
}
#elif defined(TARGET_X86)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: eax = the exception object that was caught (see GT_CATCH_ARG)
* filter: eax = the exception object that was caught (see GT_CATCH_ARG)
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: eax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: eax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* Funclet prolog/epilog sequence and funclet frame layout are TBD.
*
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// TODO We may need EBP restore sequence here if we introduce PSPSym
// Add a padding for 16-byte alignment
inst_RV_IV(INS_sub, REG_SPBASE, 12, EA_PTRSIZE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Revert a padding that was added for 16-byte alignment
inst_RV_IV(INS_add, REG_SPBASE, 12, EA_PTRSIZE);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
}
#endif // TARGET_X86
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
#if defined(TARGET_AMD64)
// The PSP sym value is Initial-SP, not Caller-SP!
// We assume that RSP is Initial-SP when this function is called. That is, the stack frame
// has been established.
//
// We generate:
// mov [rbp-20h], rsp // store the Initial-SP (our current rsp) in the PSPsym
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaPSPSym, 0);
#else // TARGET*
NYI("Set function PSP sym");
#endif // TARGET*
}
#endif // FEATURE_EH_FUNCLETS
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
assert(compiler->getSIMDSupportLevel() >= SIMD_SSE2_Supported);
emitter* emit = GetEmitter();
regNumber frameReg = genFramePointerReg();
regNumber zeroReg = REG_NA;
int blkSize = untrLclHi - untrLclLo;
int minSimdSize = XMM_REGSIZE_BYTES;
assert(blkSize >= 0);
noway_assert((blkSize % sizeof(int)) == 0);
// initReg is not a live incoming argument reg
assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) == 0);
#if defined(TARGET_AMD64)
// We will align on x64 so can use the aligned mov
instruction simdMov = simdAlignedMovIns();
// Aligning low we want to move up to next boundary
int alignedLclLo = (untrLclLo + (XMM_REGSIZE_BYTES - 1)) & -XMM_REGSIZE_BYTES;
if ((untrLclLo != alignedLclLo) && (blkSize < 2 * XMM_REGSIZE_BYTES))
{
// If unaligned and smaller then 2 x SIMD size we won't bother trying to align
assert((alignedLclLo - untrLclLo) < XMM_REGSIZE_BYTES);
simdMov = simdUnalignedMovIns();
}
#else // !defined(TARGET_AMD64)
// We aren't going to try and align on x86
instruction simdMov = simdUnalignedMovIns();
int alignedLclLo = untrLclLo;
#endif // !defined(TARGET_AMD64)
if (blkSize < minSimdSize)
{
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= blkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
#if defined(TARGET_AMD64)
assert((i == blkSize) || (i + (int)sizeof(int) == blkSize));
if (i != blkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == blkSize);
}
else
{
// Grab a non-argument, non-callee saved XMM reg
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
// System V x64 first temp reg is xmm8
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM8);
#else
// Windows first temp reg is xmm4
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM4);
#endif // UNIX_AMD64_ABI
#if defined(TARGET_AMD64)
int alignedLclHi;
int alignmentHiBlkSize;
if ((blkSize < 2 * XMM_REGSIZE_BYTES) || (untrLclLo == alignedLclLo))
{
// Either aligned or smaller then 2 x SIMD size so we won't try to align
// However, we still want to zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
alignmentHiBlkSize = blkSize - alignmentBlkSize;
alignedLclHi = untrLclLo + alignmentBlkSize;
alignedLclLo = untrLclLo;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
}
else
{
// We are going to align
// Aligning high we want to move down to previous boundary
alignedLclHi = untrLclHi & -XMM_REGSIZE_BYTES;
// Zero out the unaligned portions
alignmentHiBlkSize = untrLclHi - alignedLclHi;
int alignmentLoBlkSize = alignedLclLo - untrLclLo;
blkSize = alignedLclHi - alignedLclLo;
assert((blkSize + alignmentLoBlkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
assert(alignmentLoBlkSize > 0);
assert(alignmentLoBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclLo - alignmentLoBlkSize) == untrLclLo);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentLoBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
assert((i == alignmentLoBlkSize) || (i + (int)sizeof(int) == alignmentLoBlkSize));
if (i != alignmentLoBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
assert(i == alignmentLoBlkSize);
}
#else // !defined(TARGET_AMD64)
// While we aren't aligning the start, we still want to
// zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
int alignmentHiBlkSize = blkSize - alignmentBlkSize;
int alignedLclHi = untrLclLo + alignmentBlkSize;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
#endif // !defined(TARGET_AMD64)
// The loop is unrolled 3 times so we do not move to the loop block until it
// will loop at least once so the threshold is 6.
if (blkSize < (6 * XMM_REGSIZE_BYTES))
{
// Generate the following code:
//
// xorps xmm4, xmm4
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// ...
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// mov qword ptr [ebp/esp-OFFS], rax
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
int i = 0;
for (; i < blkSize; i += XMM_REGSIZE_BYTES)
{
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo + i);
}
assert(i == blkSize);
}
else
{
// Generate the following code:
//
// xorps xmm4, xmm4
// ;movaps xmmword ptr[ebp/esp-loOFFS], xmm4 ; alignment to 3x
// ;movaps xmmword ptr[ebp/esp-loOFFS + 10H], xmm4 ;
// mov rax, - <size> ; start offset from hi
// movaps xmmword ptr[rbp + rax + hiOFFS ], xmm4 ; <--+
// movaps xmmword ptr[rbp + rax + hiOFFS + 10H], xmm4 ; |
// movaps xmmword ptr[rbp + rax + hiOFFS + 20H], xmm4 ; | Loop
// add rax, 48 ; |
// jne SHORT -5 instr ; ---+
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
// How many extra don't fit into the 3x unroll
int extraSimd = (blkSize % (XMM_REGSIZE_BYTES * 3)) / XMM_REGSIZE_BYTES;
if (extraSimd != 0)
{
blkSize -= XMM_REGSIZE_BYTES;
// Not a multiple of 3 so add stores at low end of block
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo);
if (extraSimd == 2)
{
blkSize -= XMM_REGSIZE_BYTES;
// one more store needed
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg,
alignedLclLo + XMM_REGSIZE_BYTES);
}
}
// Exact multiple of 3 simd lengths (or loop end condition will not be met)
noway_assert((blkSize % (3 * XMM_REGSIZE_BYTES)) == 0);
// At least 3 simd lengths remain (as loop is 3x unrolled and we want it to loop at least once)
assert(blkSize >= (3 * XMM_REGSIZE_BYTES));
// In range at start of loop
assert((alignedLclHi - blkSize) >= untrLclLo);
assert(((alignedLclHi - blkSize) + (XMM_REGSIZE_BYTES * 2)) < (untrLclHi - XMM_REGSIZE_BYTES));
// In range at end of loop
assert((alignedLclHi - (3 * XMM_REGSIZE_BYTES) + (2 * XMM_REGSIZE_BYTES)) <=
(untrLclHi - XMM_REGSIZE_BYTES));
assert((alignedLclHi - (blkSize + extraSimd * XMM_REGSIZE_BYTES)) == alignedLclLo);
// Set loop counter
emit->emitIns_R_I(INS_mov, EA_PTRSIZE, initReg, -(ssize_t)blkSize);
// Loop start
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1, alignedLclHi);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + XMM_REGSIZE_BYTES);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + 2 * XMM_REGSIZE_BYTES);
emit->emitIns_R_I(INS_add, EA_PTRSIZE, initReg, XMM_REGSIZE_BYTES * 3);
// Loop until counter is 0
emit->emitIns_J(INS_jne, nullptr, -5);
// initReg will be zero at end of the loop
*pInitRegZeroed = true;
}
if (untrLclHi != alignedLclHi)
{
assert(alignmentHiBlkSize > 0);
assert(alignmentHiBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclHi + alignmentHiBlkSize) == untrLclHi);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentHiBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, alignedLclHi + i);
}
#if defined(TARGET_AMD64)
assert((i == alignmentHiBlkSize) || (i + (int)sizeof(int) == alignmentHiBlkSize));
if (i != alignmentHiBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, alignedLclHi + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == alignmentHiBlkSize);
}
}
}
// Save compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize)
{
genVzeroupperIfNeeded(false);
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
unsigned offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
// Offset is 16-byte aligned since we use movaps for preserving xmm regs.
assert((offset % 16) == 0);
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to preserve lower 128-bits of YMM register.
GetEmitter()->emitIns_AR_R(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
}
// Save/Restore compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
{
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
genVzeroupperIfNeeded();
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned firstFPRegPadding = 0;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
unsigned offset;
regNumber regBase;
if (compiler->compLocallocUsed)
{
// localloc frame: use frame pointer relative offset
assert(isFramePointerUsed());
regBase = REG_FPBASE;
offset = lclFrameSize - genSPtoFPdelta() - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
else
{
regBase = REG_SPBASE;
offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
#ifdef TARGET_AMD64
// Offset is 16-byte aligned since we use movaps for restoring xmm regs
assert((offset % 16) == 0);
#endif // TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to restore lower 128-bits of YMM register.
GetEmitter()->emitIns_R_AR(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, regBase, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
genVzeroupperIfNeeded();
}
// Generate Vzeroupper instruction as needed to zero out upper 128b-bit of all YMM registers so that the
// AVX/Legacy SSE transition penalties can be avoided. This function is been used in genPreserveCalleeSavedFltRegs
// (prolog) and genRestoreCalleeSavedFltRegs (epilog). Issue VZEROUPPER in Prolog if the method contains
// 128-bit or 256-bit AVX code, to avoid legacy SSE to AVX transition penalty, which could happen when native
// code contains legacy SSE code calling into JIT AVX code (e.g. reverse pinvoke). Issue VZEROUPPER in Epilog
// if the method contains 256-bit AVX code, to avoid AVX to legacy SSE transition penalty.
//
// Params
// check256bitOnly - true to check if the function contains 256-bit AVX instruction and generate Vzeroupper
// instruction, false to check if the function contains AVX instruciton (either 128-bit or 256-bit).
//
void CodeGen::genVzeroupperIfNeeded(bool check256bitOnly /* = true*/)
{
bool emitVzeroUpper = false;
if (check256bitOnly)
{
emitVzeroUpper = GetEmitter()->Contains256bitAVX();
}
else
{
emitVzeroUpper = GetEmitter()->ContainsAVX();
}
if (emitVzeroUpper)
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (Load-only is no-op on xarch)
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// only full barrier needs to be emitted on Xarch
if (barrierKind == BARRIER_FULL)
{
instGen(INS_lock);
GetEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
}
}
#ifdef TARGET_AMD64
// Returns relocation type hint for an addr.
// Note that there are no reloc hints on x86.
//
// Arguments
// addr - data address
//
// Returns
// relocation type hint
//
unsigned short CodeGenInterface::genAddrRelocTypeHint(size_t addr)
{
return compiler->eeGetRelocTypeHint((void*)addr);
}
#endif // TARGET_AMD64
// Return true if an absolute indirect data address can be encoded as IP-relative.
// offset. Note that this method should be used only when the caller knows that
// the address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect data address
//
// Returns
// true if indir data addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return false;
#endif
}
// Return true if an indirect code address can be encoded as IP-relative offset.
// Note that this method should be used only when the caller knows that the
// address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return true;
#endif
}
// Return true if an indirect code address can be encoded as 32-bit displacement
// relative to zero. Note that this method should be used only when the caller
// knows that the address is an icon value that VM has given and there is no
// GenTree node representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - absolute indirect code address
//
// Returns
// true if absolute indir code addr could be encoded as 32-bit displacement relative to zero.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsZeroRelOffset(size_t addr)
{
return GenTreeIntConCommon::FitsInI32((ssize_t)addr);
}
// Return true if an absolute indirect code address needs a relocation recorded with VM.
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// See if the code indir addr can be encoded as 32-bit displacement relative to zero.
// We don't need a relocation in that case.
if (genCodeIndirAddrCanBeEncodedAsZeroRelOffset(addr))
{
return false;
}
// Else we need a relocation.
return true;
#else // TARGET_X86
// On x86 there is no need to record or ask for relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
// Return true if a direct code address needs to be marked as relocatable.
//
// Arguments
// addr - absolute direct code address
//
// Returns
// true if direct code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// By default all direct code addresses go through relocation so that VM will setup
// a jump stub if addr cannot be encoded as pc-relative offset.
return true;
#else // TARGET_X86
// On x86 there is no need for recording relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
#endif // TARGET_XARCH
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Amd64/x86 Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#pragma warning(disable : 4310) // cast truncates constant value - happens for (int8_t)0xb1
#endif
#ifdef TARGET_XARCH
#include "emit.h"
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
#include "patchpointinfo.h"
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
#ifdef TARGET_AMD64
if ((size_t)(int)compiler->gsGlobalSecurityCookieVal != compiler->gsGlobalSecurityCookieVal)
{
// initReg = #GlobalSecurityCookieVal64; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
*pInitRegZeroed = false;
}
else
#endif
{
// mov dword ptr [frame.GSSecurityCookie], #GlobalSecurityCookieVal
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Always use EAX on x86 and x64
// On x64, if we're not moving into RAX, and the address isn't RIP relative, we can't encode it.
// mov eax, dword ptr [compiler->gsGlobalSecurityCookieAddr]
// mov dword ptr [frame.GSSecurityCookie], eax
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_EAX, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
regSet.verifyRegUsed(REG_EAX);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, REG_EAX, compiler->lvaGSSecurityCookie, 0);
if (initReg == REG_EAX)
{
*pInitRegZeroed = false;
}
}
}
/*****************************************************************************
*
* Generate code to check that the GS cookie wasn't thrashed by a buffer
* overrun. If pushReg is true, preserve all registers around code sequence.
* Otherwise ECX could be modified.
*
* Implementation Note: pushReg = true, in case of tail calls.
*/
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that EAX is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by EAX.
//
// For Amd64 System V, a two-register-returned struct could be returned in RAX and RDX
// In such case make sure that the correct GC-ness of RDX is reported as well, so
// a GC object pointed by RDX will not be collected.
if (!pushReg)
{
// Handle multi-reg return type values
if (compiler->compMethodReturnsMultiRegRetType())
{
ReturnTypeDesc retTypeDesc;
if (varTypeIsLong(compiler->info.compRetNativeType))
{
retTypeDesc.InitializeLongReturnType();
}
else // we must have a struct return type
{
retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
compiler->info.compCallConv);
}
const unsigned regCount = retTypeDesc.GetReturnRegCount();
// Only x86 and x64 Unix ABI allows multi-reg return and
// number of result regs should be equal to MAX_RET_REG_COUNT.
assert(regCount == MAX_RET_REG_COUNT);
for (unsigned i = 0; i < regCount; ++i)
{
gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
// This is for returning in an implicit RetBuf.
// If the address of the buffer is returned in REG_INTRET, mark the content of INTRET as ByRef.
// In case the return is in an implicit RetBuf, the native return type should be a struct
assert(varTypeIsStruct(compiler->info.compRetNativeType));
gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
}
// ... all other cases.
else
{
#ifdef TARGET_AMD64
// For x64, structs that are not returned in registers are always
// returned in implicit RetBuf. If we reached here, we should not have
// a RetBuf and the return type should not be a struct.
assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
assert(!varTypeIsStruct(compiler->info.compRetNativeType));
#endif // TARGET_AMD64
// For x86 Windows we can't make such assertions since we generate code for returning of
// the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
// compRetNativeType could be TYP_STRUCT.
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
}
}
regNumber regGSCheck;
regMaskTP regMaskGSCheck = RBM_NONE;
if (!pushReg)
{
// Non-tail call: we can use any callee trash register that is not
// a return register or contain 'this' pointer (keep alive this), since
// we are generating GS cookie check after a GT_RETURN block.
// Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
// as return register for two-register-returned structs.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg() &&
(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum() == REG_ARG_0))
{
regGSCheck = REG_ARG_1;
}
else
{
regGSCheck = REG_ARG_0;
}
}
else
{
#ifdef TARGET_X86
// It doesn't matter which register we pick, since we're going to save and restore it
// around the check.
// TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes?
regGSCheck = REG_EAX;
regMaskGSCheck = RBM_EAX;
#else // !TARGET_X86
// Jmp calls: specify method handle using which JIT queries VM for its entry point
// address and hence it can neither be a VSD call nor PInvoke calli with cookie
// parameter. Therefore, in case of jmp calls it is safe to use R11.
regGSCheck = REG_R11;
#endif // !TARGET_X86
}
regMaskTP byrefPushedRegs = RBM_NONE;
regMaskTP norefPushedRegs = RBM_NONE;
regMaskTP pushedRegs = RBM_NONE;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
#if defined(TARGET_AMD64)
// If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
// Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSCheck, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
else
#endif // defined(TARGET_AMD64)
{
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Ngen case - GS cookie value needs to be accessed through an indirection.
pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_je, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
}
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
#if defined(FEATURE_EH_FUNCLETS)
// Generate a call to the finally, like this:
// mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
// call finally-funclet
// jmp finally-return // Only for non-retless finally calls
// The jmp can be a NOP if we're going to the next block.
// If we're generating code for the main function (not a funclet), and there is no localloc,
// then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP
// instead of loading the PSPSym in this case, or if PSPSym is not used (CoreRT ABI).
if ((compiler->lvaPSPSym == BAD_VAR_NUM) ||
(!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT)))
{
#ifndef UNIX_X86_ABI
inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
#endif // !UNIX_X86_ABI
}
else
{
GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
}
GetEmitter()->emitIns_J(INS_call, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// We have a retless call, and the last instruction generated was a call.
// If the next block is in a different EH region (or is the end of the code
// block), then we need to generate a breakpoint here (since it will never
// get executed) to get proper unwind behavior.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
else
{
// TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other
// architectures?
#ifndef JIT32_GCENCODER
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
GetEmitter()->emitDisableGC();
#endif // JIT32_GCENCODER
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
{
// Fall-through.
// TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
// to the next instruction? This would depend on stack walking from within the finally
// handler working without this instruction being in this special EH region.
instGen(INS_nop);
}
else
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
#ifndef JIT32_GCENCODER
GetEmitter()->emitEnableGC();
#endif // JIT32_GCENCODER
}
#else // !FEATURE_EH_FUNCLETS
// If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
// corresponding to the finally's nesting level. When invoked in response to an exception, the
// EE does this.
//
// We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
//
// We will emit :
// mov [ebp - (n + 1)], 0
// mov [ebp - n ], 0xFC
// push &step
// jmp finallyBlock
// ...
// step:
// mov [ebp - n ], 0
// jmp leaveTarget
// ...
// leaveTarget:
noway_assert(isFramePointerUsed());
// Get the nesting level which contains the finally
unsigned finallyNesting = 0;
compiler->fgGetNestingLevel(block, &finallyNesting);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
// Zero out the slot for the next nesting level
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar,
curNestingSlotOffs - TARGET_POINTER_SIZE, 0);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK);
// Now push the address where the finally funclet should return to directly.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
GetEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
}
else
{
// EE expects a DWORD, so we provide 0
inst_IV(INS_push_hide, 0);
}
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
#endif // !FEATURE_EH_FUNCLETS
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
block = block->bbNext;
}
return block;
}
#if defined(FEATURE_EH_FUNCLETS)
void CodeGen::genEHCatchRet(BasicBlock* block)
{
// Set RAX to the address the VM should return to after the catch.
// Generate a RIP-relative
// lea reg, [rip + disp32] ; the RIP is implicit
// which will be position-independent.
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
}
#else // !FEATURE_EH_FUNCLETS
void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block)
{
// The last statement of the block must be a GT_RETFILT, which has already been generated.
assert(block->lastNode() != nullptr);
assert(block->lastNode()->OperGet() == GT_RETFILT);
if (block->bbJumpKind == BBJ_EHFINALLYRET)
{
assert(block->lastNode()->AsOp()->gtOp1 == nullptr); // op1 == nullptr means endfinally
// Return using a pop-jmp sequence. As the "try" block calls
// the finally with a jmp, this leaves the x86 call-ret stack
// balanced in the normal flow of path.
noway_assert(isFramePointerRequired());
inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
}
else
{
assert(block->bbJumpKind == BBJ_EHFILTERRET);
// The return value has already been computed.
instGen_Return(0);
}
}
#endif // !FEATURE_EH_FUNCLETS
// Move an immediate value into an integer register
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
emitAttr origAttr = size;
if (!compiler->opts.compReloc)
{
// Strip any reloc flags from size if we aren't doing relocs
size = EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG | EA_DSP_RELOC_FLG);
}
if ((imm == 0) && !EA_IS_RELOC(size))
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
// Only use lea if the original was relocatable. Otherwise we can get spurious
// instruction selection due to different memory placement at runtime.
if (EA_IS_RELOC(origAttr) && genDataIndirAddrCanBeEncodedAsPCRelOffset(imm))
{
// We will use lea so displacement and not immediate will be relocatable
size = EA_SET_FLG(EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG), EA_DSP_RELOC_FLG);
GetEmitter()->emitIns_R_AI(INS_lea, size, reg, imm);
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm DEBUGARG(gtFlags));
}
}
regSet.verifyRegUsed(reg);
}
/***********************************************************************************
*
* Generate code to set a register 'targetReg' of type 'targetType' to the constant
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// Currently this cannot be done for all handles due to
// https://github.com/dotnet/runtime/issues/60712. However, it is
// also unclear whether we unconditionally want to use rip-relative
// lea instructions when not necessary. While a mov is larger, on
// many Intel CPUs rip-relative lea instructions have higher
// latency.
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal, INS_FLAGS_DONT_CARE DEBUGARG(0) DEBUGARG(tree->gtFlags));
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(targetType);
double constValue = tree->AsDblCon()->gtDconVal;
// Make sure we use "xorps reg, reg" only for +ve zero constant (0.0) and not for -ve zero (-0.0)
if (*(__int64*)&constValue == 0)
{
// A faster/smaller way to generate 0
emit->emitIns_R_R(INS_xorps, size, targetReg, targetReg);
}
else
{
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(constValue, size);
emit->emitIns_R_C(ins_Load(targetType), size, targetReg, hnd, 0);
}
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
if (varTypeIsFloating(targetType))
{
assert(tree->gtOper == GT_NEG);
genSSE2BitwiseOp(tree);
}
else
{
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
inst_RV(ins, targetReg, targetType);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBswap: Produce code for a GT_BSWAP / GT_BSWAP16 node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForBswap(GenTree* tree)
{
// TODO: If we're swapping immediately after a read from memory or immediately before
// a write to memory, use the MOVBE instruction instead of the BSWAP instruction if
// the platform supports it.
assert(tree->OperIs(GT_BSWAP, GT_BSWAP16));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
if (tree->OperIs(GT_BSWAP))
{
// 32-bit and 64-bit byte swaps use "bswap reg"
inst_RV(INS_bswap, targetReg, targetType);
}
else
{
// 16-bit byte swaps use "ror reg.16, 8"
inst_RV_IV(INS_ror_N, targetReg, 8 /* val */, emitAttr::EA_2BYTE);
}
genProduceReg(tree);
}
// Produce code for a GT_INC_SATURATE node.
void CodeGen::genCodeForIncSaturate(GenTree* tree)
{
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
inst_RV_IV(INS_add, targetReg, 1, emitActualTypeSize(targetType));
inst_RV_IV(INS_sbb, targetReg, 0, emitActualTypeSize(targetType));
genProduceReg(tree);
}
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
assert(!treeNode->gtOverflowEx());
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(treeNode);
GenTree* op1 = treeNode->AsOp()->gtOp1;
GenTree* op2 = treeNode->AsOp()->gtOp2;
// to get the high bits of the multiply, we are constrained to using the
// 1-op form: RDX:RAX = RAX * rm
// The 3-op form (Rx=Ry*Rz) does not support it.
genConsumeOperands(treeNode->AsOp());
GenTree* regOp = op1;
GenTree* rmOp = op2;
// Set rmOp to the memory operand (if any)
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == REG_RAX)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, REG_RAX, regOp->GetRegNum(), /* canSkip */ true);
instruction ins;
if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
{
ins = INS_imulEAX;
}
else
{
ins = INS_mulEAX;
}
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (treeNode->OperGet() == GT_MULHI)
{
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForLongUMod: Generate code for a tree of the form
// `(umod (gt_long x y) (const int))`
//
// Arguments:
// node - the node for which to generate code
//
void CodeGen::genCodeForLongUMod(GenTreeOp* node)
{
assert(node != nullptr);
assert(node->OperGet() == GT_UMOD);
assert(node->TypeGet() == TYP_INT);
GenTreeOp* const dividend = node->gtOp1->AsOp();
assert(dividend->OperGet() == GT_LONG);
assert(varTypeIsLong(dividend));
genConsumeOperands(node);
GenTree* const dividendLo = dividend->gtOp1;
GenTree* const dividendHi = dividend->gtOp2;
assert(dividendLo->isUsedFromReg());
assert(dividendHi->isUsedFromReg());
GenTree* const divisor = node->gtOp2;
assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
// dividendLo must be in RAX; dividendHi must be in RDX
genCopyRegIfNeeded(dividendLo, REG_EAX);
genCopyRegIfNeeded(dividendHi, REG_EDX);
// At this point, EAX:EDX contains the 64bit dividend and op2->GetRegNum()
// contains the 32bit divisor. We want to generate the following code:
//
// cmp edx, divisor->GetRegNum()
// jb noOverflow
//
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
//
// noOverflow:
// div divisor->GetRegNum()
//
// This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c.
BasicBlock* const noOverflow = genCreateTempLabel();
// cmp edx, divisor->GetRegNum()
// jb noOverflow
inst_RV_RV(INS_cmp, REG_EDX, divisor->GetRegNum());
inst_JMP(EJ_jb, noOverflow);
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
const regNumber tempReg = node->GetSingleTempReg();
inst_Mov(TYP_INT, tempReg, REG_EAX, /* canSkip */ false);
inst_Mov(TYP_INT, REG_EAX, REG_EDX, /* canSkip */ false);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
inst_Mov(TYP_INT, REG_EAX, tempReg, /* canSkip */ false);
// noOverflow:
// div divisor->GetRegNum()
genDefineTempLabel(noOverflow);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
const regNumber targetReg = node->GetRegNum();
inst_Mov(TYP_INT, targetReg, REG_RDX, /* canSkip */ true);
genProduceReg(node);
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForDivMod: Generate code for a DIV or MOD operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
GenTree* dividend = treeNode->gtOp1;
#ifdef TARGET_X86
if (varTypeIsLong(dividend->TypeGet()))
{
genCodeForLongUMod(treeNode);
return;
}
#endif // TARGET_X86
GenTree* divisor = treeNode->gtOp2;
genTreeOps oper = treeNode->OperGet();
emitAttr size = emitTypeSize(treeNode);
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int/native int, small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
// dividend is in a register.
assert(dividend->isUsedFromReg());
genConsumeOperands(treeNode->AsOp());
// dividend must be in RAX
genCopyRegIfNeeded(dividend, REG_RAX);
// zero or sign extend rax to rdx
if (oper == GT_UMOD || oper == GT_UDIV ||
(dividend->IsIntegralConst() && (dividend->AsIntConCommon()->IconValue() > 0)))
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
}
else
{
emit->emitIns(INS_cdq, size);
// the cdq instruction writes RDX, So clear the gcInfo for RDX
gcInfo.gcMarkRegSetNpt(RBM_RDX);
}
// Perform the 'targetType' (64-bit or 32-bit) divide instruction
instruction ins;
if (oper == GT_UMOD || oper == GT_UDIV)
{
ins = INS_div;
}
else
{
ins = INS_idiv;
}
emit->emitInsBinary(ins, size, treeNode, divisor);
// DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
// Move the result to the desired register, if necessary
if (oper == GT_DIV || oper == GT_UDIV)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
else
{
assert((oper == GT_MOD) || (oper == GT_UMOD));
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForBinary: Generate code for many binary arithmetic operators
//
// Arguments:
// treeNode - The binary operation for which we are generating code.
//
// Return Value:
// None.
//
// Notes:
// Integer MUL and DIV variants have special constraints on x64 so are not handled here.
// See the assert below for the operators that are handled.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
#ifdef DEBUG
bool isValidOper = treeNode->OperIs(GT_ADD, GT_SUB);
if (varTypeIsFloating(treeNode->TypeGet()))
{
isValidOper |= treeNode->OperIs(GT_MUL, GT_DIV);
}
else
{
isValidOper |= treeNode->OperIs(GT_AND, GT_OR, GT_XOR);
#ifndef TARGET_64BIT
isValidOper |= treeNode->OperIs(GT_ADD_LO, GT_ADD_HI, GT_SUB_LO, GT_SUB_HI);
#endif
}
assert(isValidOper);
#endif
genConsumeOperands(treeNode);
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// Commutative operations can mark op1 as contained or reg-optional to generate "op reg, memop/immed"
if (!op1->isUsedFromReg())
{
assert(treeNode->OperIsCommutative());
assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
op1->IsRegOptional());
op1 = treeNode->gtGetOp2();
op2 = treeNode->gtGetOp1();
}
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
regNumber op1reg = op1->isUsedFromReg() ? op1->GetRegNum() : REG_NA;
regNumber op2reg = op2->isUsedFromReg() ? op2->GetRegNum() : REG_NA;
if (varTypeIsFloating(treeNode->TypeGet()))
{
// floating-point addition, subtraction, multiplication, and division
// all have RMW semantics if VEX support is not available
bool isRMW = !compiler->canUseVexEncoding();
inst_RV_RV_TT(ins, emitTypeSize(treeNode), targetReg, op1reg, op2, isRMW);
genProduceReg(treeNode);
return;
}
GenTree* dst;
GenTree* src;
// This is the case of reg1 = reg1 op reg2
// We're ready to emit the instruction without any moves
if (op1reg == targetReg)
{
dst = op1;
src = op2;
}
// We have reg1 = reg2 op reg1
// In order for this operation to be correct
// we need that op is a commutative operation so
// we can convert it into reg1 = reg1 op reg2 and emit
// the same code as above
else if (op2reg == targetReg)
{
noway_assert(GenTree::OperIsCommutative(oper));
dst = op2;
src = op1;
}
// now we know there are 3 different operands so attempt to use LEA
else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
&& (op2->isContainedIntOrIImmed() || op2->isUsedFromReg()) && !treeNode->gtSetFlags())
{
if (op2->isContainedIntOrIImmed())
{
emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
(int)op2->AsIntConCommon()->IconValue());
}
else
{
assert(op2reg != REG_NA);
emit->emitIns_R_ARX(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, op2reg, 1, 0);
}
genProduceReg(treeNode);
return;
}
// dest, op1 and op2 registers are different:
// reg3 = reg1 op reg2
// We can implement this by issuing a mov:
// reg3 = reg1
// reg3 = reg3 op reg2
else
{
var_types op1Type = op1->TypeGet();
inst_Mov(op1Type, targetReg, op1reg, /* canSkip */ false);
regSet.verifyRegUsed(targetReg);
gcInfo.gcMarkRegPtrVal(targetReg, op1Type);
dst = treeNode;
src = op2;
}
// try to use an inc or dec
if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
{
if (src->IsIntegralConst(1))
{
emit->emitIns_R(INS_inc, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
else if (src->IsIntegralConst(-1))
{
emit->emitIns_R(INS_dec, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
}
regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
noway_assert(r == targetReg);
if (treeNode->gtOverflowEx())
{
#if !defined(TARGET_64BIT)
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
#else
assert(oper == GT_ADD || oper == GT_SUB);
#endif
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForMul: Generate code for a MUL operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForMul(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_MUL));
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int or long (only on x64), small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
instruction ins;
emitAttr size = emitTypeSize(treeNode);
bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
bool requiresOverflowCheck = treeNode->gtOverflowEx();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// there are 3 forms of x64 multiply:
// 1-op form with 128 result: RDX:RAX = RAX * rm
// 2-op form: reg *= rm
// 3-op form: reg = rm * imm
genConsumeOperands(treeNode);
// This matches the 'mul' lowering in Lowering::SetMulOpCounts()
//
// immOp :: Only one operand can be an immediate
// rmOp :: Only one operand can be a memory op.
// regOp :: A register op (especially the operand that matches 'targetReg')
// (can be nullptr when we have both a memory op and an immediate op)
GenTree* immOp = nullptr;
GenTree* rmOp = op1;
GenTree* regOp;
if (op2->isContainedIntOrIImmed())
{
immOp = op2;
}
else if (op1->isContainedIntOrIImmed())
{
immOp = op1;
rmOp = op2;
}
if (immOp != nullptr)
{
// CQ: When possible use LEA for mul by imm 3, 5 or 9
ssize_t imm = immOp->AsIntConCommon()->IconValue();
if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
{
// We will use the LEA instruction to perform this multiply
// Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
unsigned int scale = (unsigned int)(imm - 1);
GetEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->GetRegNum(), rmOp->GetRegNum(), scale, 0);
}
else if (!requiresOverflowCheck && rmOp->isUsedFromReg() && (imm == genFindLowestBit(imm)) && (imm != 0))
{
// Use shift for constant multiply when legal
uint64_t zextImm = static_cast<uint64_t>(static_cast<size_t>(imm));
unsigned int shiftAmount = genLog2(zextImm);
// Copy reg src to dest register
inst_Mov(targetType, targetReg, rmOp->GetRegNum(), /* canSkip */ true);
inst_RV_SH(INS_shl, size, targetReg, shiftAmount);
}
else
{
// use the 3-op form with immediate
ins = GetEmitter()->inst3opImulForReg(targetReg);
emit->emitInsBinary(ins, size, rmOp, immOp);
}
}
else // we have no contained immediate operand
{
regOp = op1;
rmOp = op2;
regNumber mulTargetReg = targetReg;
if (isUnsignedMultiply && requiresOverflowCheck)
{
ins = INS_mulEAX;
mulTargetReg = REG_RAX;
}
else
{
ins = INS_imul;
}
// Set rmOp to the memory operand (if any)
// or set regOp to the op2 when it has the matching target register for our multiply op
//
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == mulTargetReg)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, mulTargetReg, regOp->GetRegNum(), /* canSkip */ true);
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (ins == INS_mulEAX)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
}
if (requiresOverflowCheck)
{
// Overflow checking is only used for non-floating point types
noway_assert(!varTypeIsFloating(treeNode));
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
// This is a case of operand is in a single reg and needs to be
// returned in multiple ABI return registers.
regNumber opReg = src->GetRegNum();
regNumber reg0 = retTypeDesc->GetABIReturnReg(0);
regNumber reg1 = retTypeDesc->GetABIReturnReg(1);
assert((reg0 != REG_NA) && (reg1 != REG_NA) && (opReg != REG_NA));
const bool srcIsFloatReg = genIsValidFloatReg(opReg);
const bool dstIsFloatReg = genIsValidFloatReg(reg0);
assert(srcIsFloatReg);
#ifdef TARGET_AMD64
assert(src->TypeIs(TYP_SIMD16));
assert(srcIsFloatReg == dstIsFloatReg);
if (opReg != reg0 && opReg != reg1)
{
// Operand reg is different from return regs.
// Copy opReg to reg0 and let it to be handled by one of the
// two cases below.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
opReg = reg0;
}
if (opReg == reg0)
{
assert(opReg != reg1);
// reg1 = opReg.
inst_Mov(TYP_SIMD16, reg1, opReg, /* canSkip */ false);
}
else
{
assert(opReg == reg1);
// reg0 = opReg.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
}
// reg0 - already has required 8-byte in bit position [63:0].
// swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
#else // TARGET_X86
assert(src->TypeIs(TYP_SIMD8));
assert(srcIsFloatReg != dstIsFloatReg);
assert((reg0 == REG_EAX) && (reg1 == REG_EDX));
// reg0 = opReg[31:0]
inst_Mov(TYP_INT, reg0, opReg, /* canSkip */ false);
// reg1 = opRef[61:32]
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
inst_RV_TT_IV(INS_pextrd, EA_4BYTE, reg1, src, 1);
}
else
{
int8_t shuffleMask = 1; // we only need [61:32]->[31:0], the rest is not read.
inst_RV_TT_IV(INS_pshufd, EA_8BYTE, opReg, src, shuffleMask);
inst_Mov(TYP_INT, reg1, opReg, /* canSkip */ false);
}
#endif // TARGET_X86
}
#endif // FEATURE_SIMD
#if defined(TARGET_X86)
//------------------------------------------------------------------------
// genFloatReturn: Generates code for float return statement for x86.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with float type.
//
// Return Value:
// None
//
void CodeGen::genFloatReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
assert(varTypeIsFloating(treeNode));
GenTree* op1 = treeNode->gtGetOp1();
// Spill the return value register from an XMM register to the stack, then load it on the x87 stack.
// If it already has a home location, use that. Otherwise, we need a temp.
if (genIsRegCandidateLocal(op1) && compiler->lvaGetDesc(op1->AsLclVarCommon())->lvOnFrame)
{
if (compiler->lvaGetDesc(op1->AsLclVarCommon())->GetRegNum() != REG_STK)
{
op1->gtFlags |= GTF_SPILL;
inst_TT_RV(ins_Store(op1->gtType, compiler->isSIMDTypeLocalAligned(op1->AsLclVarCommon()->GetLclNum())),
emitTypeSize(op1->TypeGet()), op1, op1->GetRegNum());
}
// Now, load it to the fp stack.
GetEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->GetLclNum(), 0);
}
else
{
// Spill the value, which should be in a register, then load it to the fp stack.
// TODO-X86-CQ: Deal with things that are already in memory (don't call genConsumeReg yet).
op1->gtFlags |= GTF_SPILL;
regSet.rsSpillTree(op1->GetRegNum(), op1);
op1->gtFlags |= GTF_SPILLED;
op1->gtFlags &= ~GTF_SPILL;
TempDsc* t = regSet.rsUnspillInPlace(op1, op1->GetRegNum());
inst_FS_ST(INS_fld, emitActualTypeSize(op1->gtType), t, 0);
op1->gtFlags &= ~GTF_SPILLED;
regSet.tmpRlsTemp(t);
}
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE/GT_CMP node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
assert(tree->OperIs(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE, GT_CMP));
// TODO-XArch-CQ: Check if we can use the currently set flags.
// TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
GenTree* op1 = tree->gtOp1;
var_types op1Type = op1->TypeGet();
if (varTypeIsFloating(op1Type))
{
genCompareFloat(tree);
}
else
{
genCompareInt(tree);
}
}
//------------------------------------------------------------------------
// genCodeForBT: Generates code for a GT_BT node.
//
// Arguments:
// tree - The node.
//
void CodeGen::genCodeForBT(GenTreeOp* bt)
{
assert(bt->OperIs(GT_BT));
GenTree* op1 = bt->gtGetOp1();
GenTree* op2 = bt->gtGetOp2();
var_types type = genActualType(op1->TypeGet());
assert(op1->isUsedFromReg() && op2->isUsedFromReg());
assert((genTypeSize(type) >= genTypeSize(TYP_INT)) && (genTypeSize(type) <= genTypeSize(TYP_I_IMPL)));
genConsumeOperands(bt);
// Note that the emitter doesn't fully support INS_bt, it only supports the reg,reg
// form and encodes the registers in reverse order. To get the correct order we need
// to reverse the operands when calling emitIns_R_R.
GetEmitter()->emitIns_R_R(INS_bt, emitTypeSize(type), op2->GetRegNum(), op1->GetRegNum());
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_jl }, // SLT
{ EJ_jle }, // SLE
{ EJ_jge }, // SGE
{ EJ_jg }, // SGT
{ EJ_js }, // S
{ EJ_jns }, // NS
{ EJ_je }, // EQ
{ EJ_jne }, // NE
{ EJ_jb }, // ULT
{ EJ_jbe }, // ULE
{ EJ_jae }, // UGE
{ EJ_ja }, // UGT
{ EJ_jb }, // C
{ EJ_jae }, // NC
// Floating point compare instructions (UCOMISS, UCOMISD etc.) set the condition flags as follows:
// ZF PF CF Meaning
// ---------------------
// 1 1 1 Unordered
// 0 0 0 Greater
// 0 0 1 Less Than
// 1 0 0 Equal
//
// Since ZF and CF are also set when the result is unordered, in some cases we first need to check
// PF before checking ZF/CF. In general, ordered conditions will result in a jump only if PF is not
// set and unordered conditions will result in a jump only if PF is set.
{ EJ_jnp, GT_AND, EJ_je }, // FEQ
{ EJ_jne }, // FNE
{ EJ_jnp, GT_AND, EJ_jb }, // FLT
{ EJ_jnp, GT_AND, EJ_jbe }, // FLE
{ EJ_jae }, // FGE
{ EJ_ja }, // FGT
{ EJ_jo }, // O
{ EJ_jno }, // NO
{ EJ_je }, // FEQU
{ EJ_jp, GT_OR, EJ_jne }, // FNEU
{ EJ_jb }, // FLTU
{ EJ_jbe }, // FLEU
{ EJ_jp, GT_OR, EJ_jae }, // FGEU
{ EJ_jp, GT_OR, EJ_ja }, // FGTU
{ EJ_jp }, // P
{ EJ_jnp }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg) && isByteReg(dstReg));
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
if (!varTypeIsByte(type))
{
GetEmitter()->emitIns_Mov(INS_movzx, EA_1BYTE, dstReg, dstReg, /* canSkip */ false);
}
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_je, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
regNumber tmpReg = tree->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(tmpReg));
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN, tmpReg);
genDefineTempLabel(skipLabel);
}
/*****************************************************************************
*
* Generate code for a single node in the tree.
* Preconditions: All operands have been evaluated
*
*/
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg;
#if !defined(TARGET_64BIT)
if (treeNode->TypeGet() == TYP_LONG)
{
// All long enregistered nodes will have been decomposed into their
// constituent lo and hi nodes.
targetReg = REG_NA;
}
else
#endif // !defined(TARGET_64BIT)
{
targetReg = treeNode->GetRegNum();
}
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperIsConst()));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
#ifndef JIT32_GCENCODER
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
#endif // !defined(JIT32_GCENCODER)
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
#ifdef PROFILING_SUPPORTED
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
#ifdef TARGET_X86
assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL));
#endif // TARGET_X86
FALLTHROUGH;
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
case GT_DIV:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
FALLTHROUGH;
case GT_MOD:
case GT_UMOD:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
genCodeForBinary(treeNode->AsOp());
break;
case GT_MUL:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
genCodeForMul(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode);
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
#ifdef TARGET_X86
case GT_MUL_LONG:
#endif
genCodeForMulHi(treeNode->AsOp());
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_BT:
genCodeForBT(treeNode->AsOp());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_LOCKADD:
genCodeForLockAdd(treeNode->AsOp());
break;
case GT_XCHG:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_XORR:
case GT_XAND:
NYI("Interlocked.Or and Interlocked.And aren't implemented for x86 yet.");
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
genConsumeRegs(treeNode->AsOp()->gtOp1);
break;
case GT_NO_OP:
GetEmitter()->emitIns_Nop(1);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
// Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
// mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
size_t finallyNesting;
finallyNesting = treeNode->AsVal()->gtVal1;
noway_assert(treeNode->AsVal()->gtVal1 < compiler->compHndBBtabCount);
noway_assert(finallyNesting < compiler->compHndBBtabCount);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
TARGET_POINTER_SIZE); // below doesn't underflow.
filterEndOffsetSlotOffs =
(unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
size_t curNestingSlotOffs;
curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, (unsigned)curNestingSlotOffs,
0);
break;
#endif // !FEATURE_EH_FUNCLETS
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->GetRegNum());
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, ArrLen(message), _TRUNCATE, "NYI: Unimplemented node type %s\n",
GenTree::OpName(treeNode->OperGet()));
NYIRAW(message);
#endif
assert(!"Unknown node in codegen");
}
break;
}
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GenTreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
assert(varTypeIsSIMD(lclNode));
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount = actualOp1->GetMultiRegCount(compiler);
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Right now the only enregistrable structs supported are SIMD types.
// They are only returned in 1 or 2 registers - the 1 register case is
// handled as a regular STORE_LCL_VAR.
// This case is always a call (AsCall() will assert if it is not).
GenTreeCall* call = actualOp1->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc->GetReturnRegCount() == MAX_RET_REG_COUNT);
assert(regCount == 2);
regNumber targetReg = lclNode->GetRegNum();
regNumber reg0 = call->GetRegNumByIdx(0);
regNumber reg1 = call->GetRegNumByIdx(1);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
if (reloadReg != REG_NA)
{
reg0 = reloadReg;
}
reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
if (reloadReg != REG_NA)
{
reg1 = reloadReg;
}
}
#ifdef UNIX_AMD64_ABI
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(1)));
// This is a case where the two 8-bytes that comprise the operand are in
// two different xmm registers and need to be assembled into a single
// xmm register.
if (targetReg != reg0 && targetReg != reg1)
{
// targetReg = reg0;
// targetReg[127:64] = reg1[127:64]
inst_Mov(TYP_DOUBLE, targetReg, reg0, /* canSkip */ false);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else if (targetReg == reg0)
{
// (elided) targetReg = reg0
// targetReg[127:64] = reg1[127:64]
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else
{
assert(targetReg == reg1);
// We need two shuffles to achieve this
// First:
// targetReg[63:0] = targetReg[63:0]
// targetReg[127:64] = reg0[63:0]
//
// Second:
// targetReg[63:0] = targetReg[127:64]
// targetReg[127:64] = targetReg[63:0]
//
// Essentially copy low 8-bytes from reg0 to high 8-bytes of targetReg
// and next swap low and high 8-bytes of targetReg to have them
// rearranged in the right order.
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg0, 0x00);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01);
}
genProduceReg(lclNode);
#elif defined(TARGET_X86)
if (TargetOS::IsWindows)
{
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(1)));
assert(lclNode->TypeIs(TYP_SIMD8));
// This is a case where a SIMD8 struct returned as [EAX, EDX]
// and needs to be assembled into a single xmm register,
// note we can't check reg0=EAX, reg1=EDX because they could be already moved.
inst_Mov(TYP_FLOAT, targetReg, reg0, /* canSkip */ false);
const emitAttr size = emitTypeSize(TYP_SIMD8);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
GetEmitter()->emitIns_SIMD_R_R_R_I(INS_pinsrd, size, targetReg, targetReg, reg1, 1);
}
else
{
regNumber tempXmm = lclNode->GetSingleTempReg();
assert(tempXmm != targetReg);
inst_Mov(TYP_FLOAT, tempXmm, reg1, /* canSkip */ false);
GetEmitter()->emitIns_SIMD_R_R_R(INS_punpckldq, size, targetReg, targetReg, tempXmm);
}
genProduceReg(lclNode);
}
#elif defined(TARGET_AMD64)
assert(!TargetOS::IsWindows || !"Multireg store to SIMD reg not supported on Windows x64");
#else
#error Unsupported or unset target architecture
#endif
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
if (delta == 0)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, /* canSkip */ false);
#ifdef USING_SCOPE_INFO
psiMoveESPtoEBP();
#endif // USING_SCOPE_INFO
}
else
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
// We don't update prolog scope info (there is no function to handle lea), but that is currently dead code
// anyway.
}
if (reportUnwindData)
{
compiler->unwindSetFrameReg(REG_FPBASE, delta);
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack and allocate the local stack frame - subtract from SP.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
if (frameSize == REGSIZE_BYTES)
{
// Frame size is the same as register size.
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_EAX);
compiler->unwindAllocStack(frameSize);
}
else if (frameSize < pageSize)
{
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
compiler->unwindAllocStack(frameSize);
const unsigned lastProbedLocToFinalSp = frameSize;
if (lastProbedLocToFinalSp + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize)
{
// We haven't probed almost a complete page. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we need to probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_AR(INS_test, EA_4BYTE, REG_EAX, REG_SPBASE, 0);
}
}
else
{
#ifdef TARGET_X86
int spOffset = -(int)frameSize;
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
spOffset += REGSIZE_BYTES;
}
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, spOffset);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_pop, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
}
#else // !TARGET_X86
static_assert_no_msg((RBM_STACK_PROBE_HELPER_ARG & (RBM_SECRET_STUB_PARAM | RBM_DEFAULT_HELPER_CALL_TARGET)) ==
RBM_NONE);
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, -(int)frameSize);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (initReg == REG_DEFAULT_HELPER_CALL_TARGET)
{
*pInitRegZeroed = false;
}
static_assert_no_msg((RBM_STACK_PROBE_HELPER_TRASH & RBM_STACK_PROBE_HELPER_ARG) == RBM_NONE);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
#endif // !TARGET_X86
compiler->unwindAllocStack(frameSize);
if (initReg == REG_STACK_PROBE_HELPER_ARG)
{
*pInitRegZeroed = false;
}
}
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(frameSize);
}
#endif // USING_SCOPE_INFO
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_X86
if (regTmp != REG_NA)
{
// For x86, some cases don't want to use "sub ESP" because we don't want the emitter to track the adjustment
// to ESP. So do the work in the count register.
// TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
// creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
// track".
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, (target_ssize_t)-spDelta, EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
}
else
#endif // TARGET_X86
{
inst_RV_IV(INS_sub, REG_SPBASE, (target_ssize_t)-spDelta, EA_PTRSIZE);
}
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line. It is required for the case where we need to expose
// (not hide) the stack level adjustment. We can't use the dynamic loop in that case, because the total
// stack adjustment would not be visible to the emitter. It would be possible to use this version for
// multiple hidden constant stack level adjustments but we don't do that currently (we use the loop
// version in genStackPointerDynamicAdjustmentWithProbe instead).
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genStackPointerDynamicAdjustmentWithProbe: add a register value to the stack pointer,
// and probe the stack as appropriate.
//
// Note that for x86, we hide the ESP adjustment from the emitter. To do that, currently,
// requires a temporary register and extra code.
//
// Arguments:
// regSpDelta - the register value to add to SP. The value in this register must be negative.
// This register might be trashed.
// regTmp - an available temporary register. Will be trashed.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp)
{
assert(regSpDelta != REG_NA);
assert(regTmp != REG_NA);
// Tickle the pages to ensure that ESP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case ESP is on the last byte of the guard page. Thus you must
// touch ESP-0 first not ESP-0x1000.
//
// Another subtlety is that you don't want ESP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
// call setup would not touch the guard page but just beyond it.
//
// Note that we go through a few hoops so that ESP never points to
// illegal pages at any time during the tickling process
//
// add regSpDelta, ESP // reg now holds ultimate ESP
// jb loop // result is smaller than original ESP (no wrap around)
// xor regSpDelta, regSpDelta // Overflow, pick lowest possible number
// loop:
// test ESP, [ESP+0] // tickle the page
// mov regTmp, ESP
// sub regTmp, eeGetPageSize()
// mov ESP, regTmp
// cmp ESP, regSpDelta
// jae loop
// mov ESP, regSpDelta
BasicBlock* loop = genCreateTempLabel();
inst_RV_RV(INS_add, regSpDelta, REG_SPBASE, TYP_I_IMPL);
inst_JMP(EJ_jb, loop);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regSpDelta);
genDefineTempLabel(loop);
// Tickle the decremented value. Note that it must be done BEFORE the update of ESP since ESP might already
// be on the guard page. It is OK to leave the final value of ESP on the guard page.
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
// Subtract a page from ESP. This is a trick to avoid the emitter trying to track the
// decrement of the ESP - we do the subtraction in another reg instead of adjusting ESP directly.
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, compiler->eeGetPageSize(), EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
inst_RV_RV(INS_cmp, REG_SPBASE, regSpDelta, TYP_I_IMPL);
inst_JMP(EJ_jae, loop);
// Move the final value to ESP
inst_Mov(TYP_I_IMPL, REG_SPBASE, regSpDelta, /* canSkip */ false);
}
//------------------------------------------------------------------------
// genLclHeap: Generate code for localloc.
//
// Arguments:
// tree - the localloc tree to generate.
//
// Notes:
// Note that for x86, we don't track ESP movements while generating the localloc code.
// The ESP tracking is used to report stack pointer-relative GC info, which is not
// interesting while doing the localloc construction. Also, for functions with localloc,
// we have EBP frames, and EBP-relative locals, and ESP-relative accesses only for function
// call arguments.
//
// For x86, we store the ESP after the localloc is complete in the LocAllocSP
// variable. This variable is implicitly reported to the VM in the GC info (its position
// is defined by convention relative to other items), and is used by the GC to find the
// "base" stack pointer in functions with localloc.
//
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
regNumber targetReg = tree->GetRegNum();
regNumber regCnt = REG_NA;
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
target_ssize_t lastTouchDelta = (target_ssize_t)-1;
#ifdef DEBUG
genStackPointerCheck(compiler->opts.compStackCheckOnRet, compiler->lvaReturnSpCheck);
#endif
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
target_size_t stackAdjustment = 0;
target_size_t locAllocStackOffset = 0;
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
goto BAILOUT;
}
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
amount = AlignUp(amount, STACK_ALIGN);
}
else
{
// The localloc requested memory size is non-constant.
// Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
inst_JMP(EJ_je, endLabel);
// Compute the size of the block to allocate and perform alignment.
// If compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
// Above, we put the size in targetReg. Now, copy it to our new temp register if necessary.
inst_Mov(size->TypeGet(), regCnt, targetReg, /* canSkip */ true);
}
// Round up the number of bytes to allocate to a STACK_ALIGN boundary. This is done
// by code like:
// add reg, 15
// and reg, -16
// However, in the initialized memory case, we need the count of STACK_ALIGN-sized
// elements, not a byte count, after the alignment. So instead of the "and", which
// becomes unnecessary, generate a shift, e.g.:
// add reg, 15
// shr reg, 4
inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
if (compiler->info.compInitMem)
{
// Convert the count from a count of bytes to a loop count. We will loop once per
// stack alignment size, so each loop will zero 4 bytes on Windows/x86, and 16 bytes
// on x64 and Linux/x86.
//
// Note that we zero a single reg-size word per iteration on x86, and 2 reg-size
// words per iteration on x64. We will shift off all the stack alignment bits
// added above, so there is no need for an 'and' instruction.
// --- shr regCnt, 2 (or 4) ---
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_PTRSIZE, regCnt, STACK_ALIGN_SHIFT);
}
else
{
// Otherwise, mask off the low bits to align the byte count.
inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
}
bool initMemOrLargeAlloc; // Declaration must be separate from initialization to avoid clang compiler error.
initMemOrLargeAlloc = compiler->info.compInitMem || (amount >= compiler->eeGetPageSize()); // must be >= not >
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
// Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
// are the cases that need to be handled:
// i) Method has out-going arg area.
// It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
// Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
// ii) Method has no out-going arg area.
// Nothing to pop off from the stack.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
// If the localloc amount is a small enough constant, and we're not initializing the allocated
// memory, then don't bother popping off the ougoing arg space first; just allocate the amount
// of space needed by the allocation, and call the bottom part the new outgoing arg space.
if ((amount > 0) && !initMemOrLargeAlloc)
{
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, REG_NA);
stackAdjustment = 0;
locAllocStackOffset = (target_size_t)compiler->lvaOutgoingArgSpaceSize;
goto ALLOC_DONE;
}
inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
stackAdjustment += (target_size_t)compiler->lvaOutgoingArgSpaceSize;
locAllocStackOffset = stackAdjustment;
}
#endif
if (size->IsCnsIntOrI())
{
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
assert((amount % STACK_ALIGN) == 0);
assert((amount % REGSIZE_BYTES) == 0);
// For small allocations we will generate up to six push 0 inline
size_t cntRegSizedWords = amount / REGSIZE_BYTES;
if (compiler->info.compInitMem && (cntRegSizedWords <= 6))
{
for (; cntRegSizedWords != 0; cntRegSizedWords--)
{
inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
#ifdef TARGET_X86
bool needRegCntRegister = true;
#else // !TARGET_X86
bool needRegCntRegister = initMemOrLargeAlloc;
#endif // !TARGET_X86
if (needRegCntRegister)
{
// If compInitMem=true, we can reuse targetReg as regcnt.
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
}
}
if (!initMemOrLargeAlloc)
{
// Since the size is less than a page, and we don't need to zero init memory, simply adjust ESP.
// ESP might already be in the guard page, so we must touch it BEFORE
// the alloc, not after.
assert(amount < compiler->eeGetPageSize()); // must be < not <=
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, regCnt);
goto ALLOC_DONE;
}
// else, "mov regCnt, amount"
if (compiler->info.compInitMem)
{
// When initializing memory, we want 'amount' to be the loop count.
assert((amount % STACK_ALIGN) == 0);
amount /= STACK_ALIGN;
}
instGen_Set_Reg_To_Imm(((size_t)(int)amount == amount) ? EA_4BYTE : EA_8BYTE, regCnt, amount);
}
if (compiler->info.compInitMem)
{
// At this point 'regCnt' is set to the number of loop iterations for this loop, if each
// iteration zeros (and subtracts from the stack pointer) STACK_ALIGN bytes.
// Since we have to zero out the allocated memory AND ensure that RSP is always valid
// by tickling the pages, we will just push 0's on the stack.
assert(genIsValidIntReg(regCnt));
// Loop:
BasicBlock* loop = genCreateTempLabel();
genDefineTempLabel(loop);
static_assert_no_msg((STACK_ALIGN % REGSIZE_BYTES) == 0);
unsigned const count = (STACK_ALIGN / REGSIZE_BYTES);
for (unsigned i = 0; i < count; i++)
{
inst_IV(INS_push_hide, 0); // --- push REG_SIZE bytes of 0
}
// Note that the stack must always be aligned to STACK_ALIGN bytes
// Decrement the loop counter and loop if not done.
inst_RV(INS_dec, regCnt, TYP_I_IMPL);
inst_JMP(EJ_jne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
// Negate this value before calling the function to adjust the stack (which
// adds to ESP).
inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
regNumber regTmp = tree->GetSingleTempReg();
genStackPointerDynamicAdjustmentWithProbe(regCnt, regTmp);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate out-going arg area. Note: this also requires probes, if we have
// a very large stack adjustment! For simplicity, we use the same function used elsewhere,
// which probes the current address before subtracting. We may end up probing multiple
// times relatively "nearby".
if (stackAdjustment > 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert(lastTouchDelta >= -1);
if ((lastTouchDelta == (target_ssize_t)-1) ||
(stackAdjustment + (target_size_t)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, REG_NA);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, REG_NA);
}
}
// Return the stackalloc'ed address in result register.
// TargetReg = RSP + locAllocStackOffset
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, (int)locAllocStackOffset);
if (endLabel != nullptr)
{
genDefineTempLabel(endLabel);
}
BAILOUT:
#ifdef JIT32_GCENCODER
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
#ifdef DEBUG
// Update local variable to reflect the new stack pointer.
if (compiler->opts.compStackCheckOnRet)
{
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif
genProduceReg(tree);
}
void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
{
assert(storeBlkNode->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (storeBlkNode->OperIs(GT_STORE_OBJ))
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
assert(storeBlkNode->OperIsCopyBlkOp());
assert(storeBlkNode->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(storeBlkNode->AsObj());
return;
}
bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
switch (storeBlkNode->gtBlkOpKind)
{
#ifdef TARGET_AMD64
case GenTreeBlk::BlkOpKindHelper:
assert(!storeBlkNode->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(storeBlkNode);
}
else
{
genCodeForInitBlkHelper(storeBlkNode);
}
break;
#endif // TARGET_AMD64
case GenTreeBlk::BlkOpKindRepInstr:
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
if (isCopyBlk)
{
genCodeForCpBlkRepMovs(storeBlkNode);
}
else
{
genCodeForInitBlkRepStos(storeBlkNode);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
#endif
genCodeForCpBlkUnroll(storeBlkNode);
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
#endif
}
else
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
genCodeForInitBlkUnroll(storeBlkNode);
}
break;
default:
unreached();
}
}
//
//------------------------------------------------------------------------
// genCodeForInitBlkRepStos: Generate code for InitBlk using rep stos.
//
// Arguments:
// initBlkNode - The Block store for which we are generating code.
//
void CodeGen::genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode)
{
genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
instGen(INS_r_stosb);
}
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
regNumber srcIntReg = REG_NA;
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->AsUnOp()->gtGetOp1();
}
unsigned size = node->GetLayout()->GetSize();
// An SSE mov that accesses data larger than 8 bytes may be implemented using
// multiple memory accesses. Hence, the JIT must not use such stores when
// INITBLK zeroes a struct that contains GC pointers and can be observed by
// other threads (i.e. when dstAddr is not an address of a local).
// For example, this can happen when initializing a struct field of an object.
const bool canUse16BytesSimdMov = !node->IsOnHeapAndContainsReferences();
#ifdef TARGET_AMD64
// On Amd64 the JIT will not use SIMD stores for such structs and instead
// will always allocate a GP register for src node.
const bool willUseSimdMov = canUse16BytesSimdMov && (size >= XMM_REGSIZE_BYTES);
#else
// On X86 the JIT will use movq for structs that are larger than 16 bytes
// since it is more beneficial than using two mov-s from a GP register.
const bool willUseSimdMov = (size >= 16);
#endif
if (!src->isContained())
{
srcIntReg = genConsumeReg(src);
}
else
{
// If src is contained then it must be 0.
assert(src->IsIntegralConst(0));
assert(willUseSimdMov);
#ifdef TARGET_AMD64
assert(size >= XMM_REGSIZE_BYTES);
#else
assert(size % 8 == 0);
#endif
}
emitter* emit = GetEmitter();
assert(size <= INT32_MAX);
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (willUseSimdMov)
{
regNumber srcXmmReg = node->GetSingleTempReg(RBM_ALLFLOAT);
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
if (src->gtSkipReloadOrCopy()->IsIntegralConst(0))
{
// If the source is constant 0 then always use xorps, it's faster
// than copying the constant from a GPR to a XMM register.
emit->emitIns_R_R(INS_xorps, EA_ATTR(regSize), srcXmmReg, srcXmmReg);
}
else
{
emit->emitIns_Mov(INS_movd, EA_PTRSIZE, srcXmmReg, srcIntReg, /* canSkip */ false);
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#ifdef TARGET_X86
// For x86, we need one more to convert it from 8 bytes to 16 bytes.
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#endif
if (regSize == YMM_REGSIZE_BYTES)
{
// Extend the bytes in the lower lanes to the upper lanes
emit->emitIns_R_R_R_I(INS_vinsertf128, EA_32BYTE, srcXmmReg, srcXmmReg, srcXmmReg, 1);
}
}
instruction simdMov = simdUnalignedMovIns();
unsigned bytesWritten = 0;
while (bytesWritten < size)
{
#ifdef TARGET_X86
if (!canUse16BytesSimdMov || (bytesWritten + regSize > size))
{
simdMov = INS_movq;
regSize = 8;
}
#endif
if (bytesWritten + regSize > size)
{
assert(srcIntReg != REG_NA);
break;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
dstOffset += regSize;
bytesWritten += regSize;
if (regSize == YMM_REGSIZE_BYTES && size - bytesWritten < YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
size -= bytesWritten;
}
// Fill the remainder using normal stores.
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, dstOffset += regSize)
{
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
dstOffset -= shiftBack;
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Generate code for a load from some address + offset
// baseNode: tree node which can be either a local address or arbitrary node
// offset: distance from the baseNode from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
{
emitter* emit = GetEmitter();
if (baseNode->OperIsLocalAddr())
{
const GenTreeLclVarCommon* lclVar = baseNode->AsLclVarCommon();
offset += lclVar->GetLclOffs();
emit->emitIns_R_S(ins, size, dst, lclVar->GetLclNum(), offset);
}
else
{
emit->emitIns_R_AR(ins, size, dst, baseNode->GetRegNum(), offset);
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll - Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
const GenTreeLclVarCommon* lclVar = dstAddr->AsLclVarCommon();
dstLclNum = lclVar->GetLclNum();
dstOffset = lclVar->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
regNumber srcAddrIndexReg = REG_NA;
unsigned srcAddrIndexScale = 1;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = srcAddr->AsAddrMode();
if (addrMode->HasBase())
{
srcAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
srcAddrIndexReg = genConsumeReg(addrMode->Index());
srcAddrIndexScale = addrMode->GetScale();
}
srcOffset = addrMode->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < (INT32_MAX - static_cast<int>(size)));
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (size >= XMM_REGSIZE_BYTES)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLFLOAT);
instruction simdMov = simdUnalignedMovIns();
// Get the largest SIMD register available if the size is large enough
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
while (size >= regSize)
{
for (; size >= regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(simdMov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(simdMov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
// Size is too large for YMM moves, try stepping down to XMM size to finish SIMD copies.
if (regSize == YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
}
// Fill the remainder with normal loads/stores
if (size > 0)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLINT);
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
srcOffset -= shiftBack;
dstOffset -= shiftBack;
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkRepMovs - Generate code for CpBlk by using rep movs
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode)
{
// Destination address goes in RDI, source address goes in RSE, and size goes in RCX.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
//------------------------------------------------------------------------
// CodeGen::genMove8IfNeeded: Conditionally move 8 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// longTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (8 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// On x86, longTmpReg must be an xmm reg; on x64 it must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset)
{
#ifdef TARGET_X86
instruction longMovIns = INS_movq;
#else // !TARGET_X86
instruction longMovIns = INS_mov;
#endif // !TARGET_X86
if ((size & 8) != 0)
{
genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_LONG, longTmpReg, offset);
return 8;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove4IfNeeded: Conditionally move 4 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (4 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove4IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 4) != 0)
{
genCodeForLoadOffset(INS_mov, EA_4BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_INT, intTmpReg, offset);
return 4;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove2IfNeeded: Conditionally move 2 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (2 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove2IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 2) != 0)
{
genCodeForLoadOffset(INS_mov, EA_2BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_SHORT, intTmpReg, offset);
return 2;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove1IfNeeded: Conditionally move 1 byte of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (1 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove1IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 1) != 0)
{
genCodeForLoadOffset(INS_mov, EA_1BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_BYTE, intTmpReg, offset);
return 1;
}
return 0;
}
//---------------------------------------------------------------------------------------------------------------//
// genStructPutArgUnroll: Generates code for passing a struct arg on stack by value using loop unrolling.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct will be copied to the
// stack.
//
// TODO-Amd64-Unix: Try to share code with copyblk.
// Need refactoring of copyblk before it could be used for putarg_stk.
// The difference for now is that a putarg_stk contains its children, while cpyblk does not.
// This creates differences in code. After some significant refactoring it could be reused.
//
void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->AsOp()->gtOp1;
// We will never call this method for SIMD types, which are stored directly
// in genPutStructArgStk().
assert(src->isContained() && src->OperIs(GT_OBJ) && src->TypeIs(TYP_STRUCT));
assert(!src->AsObj()->GetLayout()->HasGCPtr());
#ifdef TARGET_X86
assert(!m_pushStkArg);
#endif
unsigned size = putArgNode->GetStackByteSize();
#ifdef TARGET_X86
assert((XMM_REGSIZE_BYTES <= size) && (size <= CPBLK_UNROLL_LIMIT));
#else // !TARGET_X86
assert(size <= CPBLK_UNROLL_LIMIT);
#endif // !TARGET_X86
if (src->AsOp()->gtOp1->isUsedFromReg())
{
genConsumeReg(src->AsOp()->gtOp1);
}
unsigned offset = 0;
regNumber xmmTmpReg = REG_NA;
regNumber intTmpReg = REG_NA;
regNumber longTmpReg = REG_NA;
if (size >= XMM_REGSIZE_BYTES)
{
xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
}
if ((size % XMM_REGSIZE_BYTES) != 0)
{
intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
}
#ifdef TARGET_X86
longTmpReg = xmmTmpReg;
#else
longTmpReg = intTmpReg;
#endif
// Let's use SSE2 to be able to do 16 byte at a time with loads and stores.
size_t slots = size / XMM_REGSIZE_BYTES;
while (slots-- > 0)
{
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
// Load
genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmTmpReg, src->gtGetOp1(), offset);
// Store
genStoreRegToStackArg(TYP_STRUCT, xmmTmpReg, offset);
offset += XMM_REGSIZE_BYTES;
}
// Fill the remainder (15 bytes or less) if there's one.
if ((size % XMM_REGSIZE_BYTES) != 0)
{
offset += genMove8IfNeeded(size, longTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove4IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove2IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove1IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
assert(offset == size);
}
}
//------------------------------------------------------------------------
// genStructPutArgRepMovs: Generates code for passing a struct arg by value on stack using Rep Movs.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Preconditions:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct bits will go.
//
void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->gtGetOp1();
assert(src->TypeGet() == TYP_STRUCT);
assert(!src->AsObj()->GetLayout()->HasGCPtr());
// Make sure we got the arguments of the cpblk operation in the right registers, and that
// 'src' is contained as expected.
assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
assert(src->isContained());
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPush: Generates code for passing a struct arg by value on stack using "push".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used only on x86, in two cases:
// - Structs 4, 8, or 12 bytes in size (less than XMM_REGSIZE_BYTES, multiple of TARGET_POINTER_SIZE).
// - Structs that contain GC pointers - they are guaranteed to be sized correctly by the VM.
//
void CodeGen::genStructPutArgPush(GenTreePutArgStk* putArgNode)
{
// On x86, any struct that contains GC references must be stored to the stack using `push` instructions so
// that the emitter properly detects the need to update the method's GC information.
//
// Strictly speaking, it is only necessary to use "push" to store the GC references themselves, so for structs
// with large numbers of consecutive non-GC-ref-typed fields, we may be able to improve the code size in the
// future.
assert(m_pushStkArg);
GenTree* src = putArgNode->Data();
GenTree* srcAddr = putArgNode->Data()->AsObj()->Addr();
regNumber srcAddrReg = srcAddr->GetRegNum();
const bool srcAddrInReg = srcAddrReg != REG_NA;
unsigned srcLclNum = 0;
unsigned srcLclOffset = 0;
if (srcAddrInReg)
{
srcAddrReg = genConsumeReg(srcAddr);
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcLclOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
ClassLayout* layout = src->AsObj()->GetLayout();
const unsigned byteSize = putArgNode->GetStackByteSize();
assert((byteSize % TARGET_POINTER_SIZE == 0) && ((byteSize < XMM_REGSIZE_BYTES) || layout->HasGCPtr()));
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
for (int i = numSlots - 1; i >= 0; --i)
{
emitAttr slotAttr = emitTypeSize(layout->GetGCPtrType(i));
const unsigned byteOffset = i * TARGET_POINTER_SIZE;
if (srcAddrInReg)
{
GetEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcAddrReg, byteOffset);
}
else
{
GetEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + byteOffset);
}
AddStackLevel(TARGET_POINTER_SIZE);
}
}
#endif // TARGET_X86
#ifndef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPartialRepMovs: Generates code for passing a struct arg by value on stack using
// a mix of pointer-sized stores, "movsq" and "rep movsd".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used on non-x86 targets (Unix x64) for structs with GC pointers.
//
void CodeGen::genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgNode)
{
// Consume these registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_NA);
GenTreeObj* src = putArgNode->gtGetOp1()->AsObj();
ClassLayout* layout = src->GetLayout();
const bool srcIsLocal = src->Addr()->OperIsLocalAddr();
const emitAttr srcAddrAttr = srcIsLocal ? EA_PTRSIZE : EA_BYREF;
#if DEBUG
unsigned numGCSlotsCopied = 0;
#endif // DEBUG
assert(layout->HasGCPtr());
const unsigned byteSize = putArgNode->GetStackByteSize();
assert(byteSize % TARGET_POINTER_SIZE == 0);
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
// No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always.
for (unsigned i = 0; i < numSlots;)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
// instead of a sequence of movsp instructions to save cycles and code size.
unsigned adjacentNonGCSlotCount = 0;
do
{
adjacentNonGCSlotCount++;
i++;
} while ((i < numSlots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-ref region, it's better just to
// emit a sequence of movsp instructions
if (adjacentNonGCSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
for (; adjacentNonGCSlotCount > 0; adjacentNonGCSlotCount--)
{
instGen(INS_movsp);
}
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
instGen(INS_r_movsp);
}
}
else
{
// We have a GC (byref or ref) pointer
// TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
// but the logic for emitting a GC info record is not available (it is internal for the emitter
// only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
// instGen(INS_movsp); and emission of gc info.
var_types memType = layout->GetGCPtrType(i);
GetEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
#ifdef DEBUG
numGCSlotsCopied++;
#endif // DEBUG
i++;
if (i < numSlots)
{
// Source for the copy operation.
// If a LocalAddr, use EA_PTRSIZE - copy from stack.
// If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
GetEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
// Always copying to the stack - outgoing arg area
// (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
}
}
}
assert(numGCSlotsCopied == layout->GetGCPtrCount());
}
#endif // !TARGET_X86
//------------------------------------------------------------------------
// If any Vector3 args are on stack and they are not pass-by-ref, the upper 32bits
// must be cleared to zeroes. The native compiler doesn't clear the upper bits
// and there is no way to know if the caller is native or not. So, the upper
// 32 bits of Vector argument on stack are always cleared to zero.
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void CodeGen::genClearStackVec3ArgUpperBits()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genClearStackVec3ArgUpperBits()\n");
}
#endif
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
for (unsigned varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvIsParam);
// Does var has simd12 type?
if (varDsc->lvType != TYP_SIMD12)
{
continue;
}
if (!varDsc->lvIsRegArg)
{
// Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
GetEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
}
else
{
// Assume that for x64 linux, an argument is fully in registers
// or fully on stack.
regNumber argReg = varDsc->GetOtherArgReg();
// Clear the upper 32 bits by two shift instructions.
// argReg = argReg << 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
// argReg = argReg >> 96
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
}
}
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#endif // FEATURE_PUT_STRUCT_ARG_STK
//
// genCodeForCpObj - Generate code for CpObj nodes to copy structs that have interleaved
// GC pointers.
//
// Arguments:
// cpObjNode - the GT_STORE_OBJ
//
// Notes:
// This will generate a sequence of movsp instructions for the cases of non-gc members.
// Note that movsp is an alias for movsd on x86 and movsq on x64.
// and calls to the BY_REF_ASSIGN helper otherwise.
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
// Make sure we got the arguments of the cpobj operation in the right registers
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
GenTree* srcAddr = nullptr;
var_types srcAddrType = TYP_BYREF;
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
// If the GenTree node has data about GC pointers, this means we're dealing
// with CpObj, so this requires special logic.
assert(cpObjNode->GetLayout()->HasGCPtr());
// MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
// and it needs src = RSI and dst = RDI.
// Either these registers must not contain lclVars, or they must be dying or marked for spill.
// This is because these registers are incremented as we go through the struct.
if (!source->IsLocal())
{
assert(source->gtOper == GT_IND);
srcAddr = source->gtGetOp1();
GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
unsigned srcLclVarNum = BAD_VAR_NUM;
unsigned dstLclVarNum = BAD_VAR_NUM;
bool isSrcAddrLiveOut = false;
bool isDstAddrLiveOut = false;
if (genIsRegCandidateLocal(actualSrcAddr))
{
srcLclVarNum = actualSrcAddr->AsLclVarCommon()->GetLclNum();
isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
if (genIsRegCandidateLocal(actualDstAddr))
{
dstLclVarNum = actualDstAddr->AsLclVarCommon()->GetLclNum();
isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
assert((actualSrcAddr->GetRegNum() != REG_RSI) || !isSrcAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
assert((actualDstAddr->GetRegNum() != REG_RDI) || !isDstAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
srcAddrType = srcAddr->TypeGet();
}
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
unsigned slots = cpObjNode->GetLayout()->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
if (slots >= CPOBJ_NONGC_SLOTS_LIMIT)
{
// If the destination of the CpObj is on the stack, make sure we allocated
// RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
instGen(INS_r_movsp);
}
else
{
// For small structs, it's better to emit a sequence of movsp than to
// emit a rep movsp instruction.
while (slots > 0)
{
instGen(INS_movsp);
slots--;
}
}
}
else
{
ClassLayout* layout = cpObjNode->GetLayout();
unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp instead of a sequence of movsp instructions
// to save cycles and code size.
unsigned nonGcSlotCount = 0;
do
{
nonGcSlotCount++;
i++;
} while ((i < slots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-gc region, it's better just to
// emit a sequence of movsp instructions
if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
while (nonGcSlotCount > 0)
{
instGen(INS_movsp);
nonGcSlotCount--;
}
}
else
{
// Otherwise, we can save code-size and improve CQ by emitting
// rep movsp (alias for movsd/movsq for x86/x64)
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
instGen(INS_r_movsp);
}
}
else
{
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
i++;
}
}
assert(gcPtrCount == 0);
}
// Clear the gcInfo for RSI and RDI.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_RSI);
gcInfo.gcMarkRegSetNpt(RBM_RDI);
}
#ifdef TARGET_AMD64
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
// generate code do a switch statement based on a table of ip-relative offsets
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
GetEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
// add it to the absolute address of fgFirstBB
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
// jmp baseReg
GetEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabOffs;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
};
GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
GetEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->GetRegNum(),
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForLockAdd: Generate code for a GT_LOCKADD node
//
// Arguments:
// node - the GT_LOCKADD node
//
void CodeGen::genCodeForLockAdd(GenTreeOp* node)
{
assert(node->OperIs(GT_LOCKADD));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitActualTypeSize(data->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg() || data->isContainedIntOrIImmed());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
instGen(INS_lock);
if (data->isContainedIntOrIImmed())
{
int imm = static_cast<int>(data->AsIntCon()->IconValue());
assert(imm == data->AsIntCon()->IconValue());
GetEmitter()->emitIns_I_AR(INS_add, size, imm, addr->GetRegNum(), 0);
}
else
{
GetEmitter()->emitIns_AR_R(INS_add, size, data->GetRegNum(), addr->GetRegNum(), 0);
}
}
//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node.
//
// Arguments:
// node - the GT_XADD/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* node)
{
assert(node->OperIs(GT_XADD, GT_XCHG));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitTypeSize(node->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
// If the destination register is different from the data register then we need
// to first move the data to the target register. Make sure we don't overwrite
// the address, the register allocator should have taken care of this.
assert((node->GetRegNum() != addr->GetRegNum()) || (node->GetRegNum() == data->GetRegNum()));
GetEmitter()->emitIns_Mov(INS_mov, size, node->GetRegNum(), data->GetRegNum(), /* canSkip */ true);
instruction ins = node->OperIs(GT_XADD) ? INS_xadd : INS_xchg;
// XCHG has an implied lock prefix when the first operand is a memory operand.
if (ins != INS_xchg)
{
instGen(INS_lock);
}
GetEmitter()->emitIns_AR_R(ins, size, node->GetRegNum(), addr->GetRegNum(), 0);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* tree)
{
assert(tree->OperIs(GT_CMPXCHG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
GenTree* location = tree->gtOpLocation; // arg1
GenTree* value = tree->gtOpValue; // arg2
GenTree* comparand = tree->gtOpComparand; // arg3
assert(location->GetRegNum() != REG_NA && location->GetRegNum() != REG_RAX);
assert(value->GetRegNum() != REG_NA && value->GetRegNum() != REG_RAX);
genConsumeReg(location);
genConsumeReg(value);
genConsumeReg(comparand);
// comparand goes to RAX;
// Note that we must issue this move after the genConsumeRegs(), in case any of the above
// have a GT_COPY from RAX.
inst_Mov(comparand->TypeGet(), REG_RAX, comparand->GetRegNum(), /* canSkip */ true);
// location is Rm
instGen(INS_lock);
GetEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->GetRegNum(), location->GetRegNum(), 0);
// Result is in RAX
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
genProduceReg(tree);
}
// generate code for BoundsCheck nodes
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree * src1, *src2;
emitJumpKind jmpKind;
instruction cmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->IsIntegralConst(0) && arrLen->isUsedFromReg())
{
// arrIndex is 0 and arrLen is in a reg. In this case
// we can generate
// test reg, reg
// since arrLen is non-negative
src1 = arrLen;
src2 = arrLen;
jmpKind = EJ_je;
cmpKind = INS_test;
}
else if (arrIndex->isContainedIntOrIImmed())
{
// arrIndex is a contained constant. In this case
// we will generate one of the following
// cmp [mem], immed (if arrLen is a memory op)
// cmp reg, immed (if arrLen is in a reg)
//
// That is arrLen cannot be a contained immed.
assert(!arrLen->isContainedIntOrIImmed());
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_jbe;
cmpKind = INS_cmp;
}
else
{
// arrIndex could either be a contained memory op or a reg
// In this case we will generate one of the following
// cmp [mem], immed (if arrLen is a constant)
// cmp [mem], reg (if arrLen is in a reg)
// cmp reg, immed (if arrIndex is in a reg)
// cmp reg1, reg2 (if arrIndex is in reg1)
// cmp reg, [mem] (if arrLen is a memory op)
//
// That is only one of arrIndex or arrLen can be a memory op.
assert(!arrIndex->isUsedFromMemory() || !arrLen->isUsedFromMemory());
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_jae;
cmpKind = INS_cmp;
}
var_types bndsChkType = src2->TypeGet();
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(cmpKind, emitTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_NULLCHECK));
assert(tree->gtOp1->isUsedFromReg());
regNumber reg = genConsumeReg(tree->gtOp1);
GetEmitter()->emitIns_AR_R(INS_cmp, emitTypeSize(tree), reg, reg, 0);
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
noway_assert(tgtReg != REG_NA);
// Subtract the lower bound for this dimension.
// TODO-XArch-CQ: make this contained if it's an immediate that fits.
inst_Mov(indexNode->TypeGet(), tgtReg, indexReg, /* canSkip */ true);
GetEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLowerBoundOffset(rank, dim));
GetEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
GenTree* arrObj = arrOffset->gtArrObj;
regNumber tgtReg = arrOffset->GetRegNum();
assert(tgtReg != REG_NA);
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
// First, consume the operands in the correct order.
regNumber offsetReg = REG_NA;
regNumber tmpReg = REG_NA;
if (!offsetNode->IsIntegralConst(0))
{
offsetReg = genConsumeReg(offsetNode);
// We will use a temp register for the offset*scale+effectiveIndex computation.
tmpReg = arrOffset->GetSingleTempReg();
}
else
{
assert(offsetNode->isContained());
}
regNumber indexReg = genConsumeReg(indexNode);
// Although arrReg may not be used in the constant-index case, if we have generated
// the value into a register, we must consume it, otherwise we will fail to end the
// live range of the gc ptr.
// TODO-CQ: Currently arrObj will always have a register allocated to it.
// We could avoid allocating a register for it, which would be of value if the arrObj
// is an on-stack lclVar.
regNumber arrReg = REG_NA;
if (arrObj->gtHasReg(compiler))
{
arrReg = genConsumeReg(arrObj);
}
if (!offsetNode->IsIntegralConst(0))
{
assert(tmpReg != REG_NA);
assert(arrReg != REG_NA);
// Evaluate tgtReg = offsetReg*dim_size + indexReg.
// tmpReg is used to load dim_size and the result of the multiplication.
// Note that dim_size will never be negative.
GetEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
inst_RV_RV(INS_imul, tmpReg, offsetReg);
if (tmpReg == tgtReg)
{
inst_RV_RV(INS_add, tmpReg, indexReg);
}
else
{
inst_Mov(TYP_I_IMPL, tgtReg, indexReg, /* canSkip */ true);
inst_RV_RV(INS_add, tgtReg, tmpReg);
}
}
else
{
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
// Operations on SIMD vectors shouldn't come this path
assert(!varTypeIsSIMD(type));
if (varTypeIsFloating(type))
{
return ins_MathOp(oper, type);
}
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_and;
break;
case GT_LSH:
ins = INS_shl;
break;
case GT_MUL:
ins = INS_imul;
break;
case GT_NEG:
ins = INS_neg;
break;
case GT_NOT:
ins = INS_not;
break;
case GT_OR:
ins = INS_or;
break;
case GT_ROL:
ins = INS_rol;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_RSH:
ins = INS_sar;
break;
case GT_RSZ:
ins = INS_shr;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_xor;
break;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
ins = INS_add;
break;
case GT_ADD_HI:
ins = INS_adc;
break;
case GT_SUB_LO:
ins = INS_sub;
break;
case GT_SUB_HI:
ins = INS_sbb;
break;
case GT_LSH_HI:
ins = INS_shld;
break;
case GT_RSH_LO:
ins = INS_shrd;
break;
#endif // !defined(TARGET_64BIT)
default:
unreached();
break;
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is either a contained constant or
// it's a register-allocated expression. If it is in a register that is
// not RCX, it will be moved to RCX (so RCX better not be in use!).
//
void CodeGen::genCodeForShift(GenTree* tree)
{
// Only the non-RMW case here.
assert(tree->OperIsShiftOrRotate());
assert(tree->AsOp()->gtOp1->isUsedFromReg());
assert(tree->GetRegNum() != REG_NA);
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
GenTree* operand = tree->gtGetOp1();
regNumber operandReg = operand->GetRegNum();
GenTree* shiftBy = tree->gtGetOp2();
if (shiftBy->isContainedIntOrIImmed())
{
emitAttr size = emitTypeSize(tree);
// Optimize "X<<1" to "lea [reg+reg]" or "add reg, reg"
if (tree->OperIs(GT_LSH) && !tree->gtOverflowEx() && !tree->gtSetFlags() && shiftBy->IsIntegralConst(1))
{
if (tree->GetRegNum() == operandReg)
{
GetEmitter()->emitIns_R_R(INS_add, size, tree->GetRegNum(), operandReg);
}
else
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, tree->GetRegNum(), operandReg, operandReg, 1, 0);
}
}
else
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
#if defined(TARGET_64BIT)
// Try to emit rorx if BMI2 is available instead of mov+rol
// it makes sense only for 64bit integers
if ((genActualType(targetType) == TYP_LONG) && (tree->GetRegNum() != operandReg) &&
compiler->compOpportunisticallyDependsOn(InstructionSet_BMI2) && tree->OperIs(GT_ROL, GT_ROR) &&
(shiftByValue > 0) && (shiftByValue < 64))
{
const int value = tree->OperIs(GT_ROL) ? (64 - shiftByValue) : shiftByValue;
GetEmitter()->emitIns_R_R_I(INS_rorx, size, tree->GetRegNum(), operandReg, value);
genProduceReg(tree);
return;
}
#endif
// First, move the operand to the destination register and
// later on perform the shift in-place.
// (LSRA will try to avoid this situation through preferencing.)
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV_SH(ins, size, tree->GetRegNum(), shiftByValue);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The operand to be shifted must not be in ECX
noway_assert(operandReg != REG_RCX);
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV(ins, tree->GetRegNum(), targetType);
}
genProduceReg(tree);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForShiftLong: Generates the code sequence for a GenTree node that
// represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is a contained constant
//
// TODO-X86-CQ: This only handles the case where the operand being shifted is in a register. We don't
// need sourceHi to be always in reg in case of GT_LSH_HI (because it could be moved from memory to
// targetReg if sourceHi is a memory operand). Similarly for GT_RSH_LO, sourceLo could be marked as
// contained memory-op. Even if not a memory-op, we could mark it as reg-optional.
//
void CodeGen::genCodeForShiftLong(GenTree* tree)
{
// Only the non-RMW case here.
genTreeOps oper = tree->OperGet();
assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
GenTree* operand = tree->AsOp()->gtOp1;
assert(operand->OperGet() == GT_LONG);
assert(operand->AsOp()->gtOp1->isUsedFromReg());
assert(operand->AsOp()->gtOp2->isUsedFromReg());
GenTree* operandLo = operand->gtGetOp1();
GenTree* operandHi = operand->gtGetOp2();
regNumber regLo = operandLo->GetRegNum();
regNumber regHi = operandHi->GetRegNum();
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(oper, targetType);
GenTree* shiftBy = tree->gtGetOp2();
assert(shiftBy->isContainedIntOrIImmed());
unsigned int count = (unsigned int)shiftBy->AsIntConCommon()->IconValue();
regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
inst_Mov(targetType, tree->GetRegNum(), regResult, /* canSkip */ true);
if (oper == GT_LSH_HI)
{
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regLo, count);
}
else
{
assert(oper == GT_RSH_LO);
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regHi, count);
}
genProduceReg(tree);
}
#endif
//------------------------------------------------------------------------
// genMapShiftInsToShiftByConstantIns: Given a general shift/rotate instruction,
// map it to the specific x86/x64 shift opcode for a shift/rotate by a constant.
// X86/x64 has a special encoding for shift/rotate-by-constant-1.
//
// Arguments:
// ins: the base shift/rotate instruction
// shiftByValue: the constant value by which we are shifting/rotating
//
instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue)
{
assert(ins == INS_rcl || ins == INS_rcr || ins == INS_rol || ins == INS_ror || ins == INS_shl || ins == INS_shr ||
ins == INS_sar);
// Which format should we use?
instruction shiftByConstantIns;
if (shiftByValue == 1)
{
// Use the shift-by-one format.
assert(INS_rcl + 1 == INS_rcl_1);
assert(INS_rcr + 1 == INS_rcr_1);
assert(INS_rol + 1 == INS_rol_1);
assert(INS_ror + 1 == INS_ror_1);
assert(INS_shl + 1 == INS_shl_1);
assert(INS_shr + 1 == INS_shr_1);
assert(INS_sar + 1 == INS_sar_1);
shiftByConstantIns = (instruction)(ins + 1);
}
else
{
// Use the shift-by-NNN format.
assert(INS_rcl + 2 == INS_rcl_N);
assert(INS_rcr + 2 == INS_rcr_N);
assert(INS_rol + 2 == INS_rol_N);
assert(INS_ror + 2 == INS_ror_N);
assert(INS_shl + 2 == INS_shl_N);
assert(INS_shr + 2 == INS_shr_N);
assert(INS_sar + 2 == INS_sar_N);
shiftByConstantIns = (instruction)(ins + 2);
}
return shiftByConstantIns;
}
//------------------------------------------------------------------------
// genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
// represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
// GT_STOREIND( AddressTree, GT_SHL( Ind ( AddressTree ), Operand ) )
//
// Arguments:
// storeIndNode: the GT_STOREIND node.
//
void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
{
GenTree* data = storeInd->Data();
assert(data->OperIsShift() || data->OperIsRotate());
// This function only handles the RMW case.
assert(data->AsOp()->gtOp1->isUsedFromMemory());
assert(data->AsOp()->gtOp1->isIndir());
assert(Lowering::IndirsAreEquivalent(data->AsOp()->gtOp1, storeInd));
assert(data->GetRegNum() == REG_NA);
var_types targetType = data->TypeGet();
genTreeOps oper = data->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr attr = EA_ATTR(genTypeSize(targetType));
GenTree* shiftBy = data->AsOp()->gtOp2;
if (shiftBy->isContainedIntOrIImmed())
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
if (shiftByValue == 1)
{
// There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
else
{
GetEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The shiftBy operand is implicit, so call the unary version of emitInsRMW.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// tree - the node.
//
void CodeGen::genCodeForLclAddr(GenTree* tree)
{
assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
emitAttr size = emitTypeSize(targetType);
inst_RV_TT(INS_lea, targetReg, tree, 0, size);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
noway_assert(targetReg != REG_NA);
#ifdef FEATURE_SIMD
// Loading of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif
noway_assert(targetType != TYP_STRUCT);
emitAttr size = emitTypeSize(targetType);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
GetEmitter()->emitIns_R_S(ins_Load(targetType), size, targetReg, varNum, offs);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
assert(tree->OperIs(GT_LCL_VAR));
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
LclVarDsc* varDsc = compiler->lvaGetDesc(tree);
bool isRegCandidate = varDsc->lvIsRegCandidate();
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
#if defined(FEATURE_SIMD) && defined(TARGET_X86)
// Loading of TYP_SIMD12 (i.e. Vector3) variable
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif // defined(FEATURE_SIMD) && defined(TARGET_X86)
var_types type = varDsc->GetRegisterType(tree);
GetEmitter()->emitIns_R_S(ins_Load(type, compiler->isSIMDTypeLocalAligned(tree->GetLclNum())),
emitTypeSize(type), tree->GetRegNum(), tree->GetLclNum(), 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_STORE_LCL_FLD));
var_types targetType = tree->TypeGet();
GenTree* op1 = tree->gtGetOp1();
noway_assert(targetType != TYP_STRUCT);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1));
assert(genTypeSize(genActualType(targetType)) == genTypeSize(genActualType(op1->TypeGet())));
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
regNumber targetReg = tree->GetRegNum();
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
unsigned lclNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
GetEmitter()->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, tree->GetLclOffs());
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else
{
GetEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// lclNode - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
{
assert(lclNode->OperIs(GT_STORE_LCL_VAR));
regNumber targetReg = lclNode->GetRegNum();
emitter* emit = GetEmitter();
GenTree* op1 = lclNode->gtGetOp1();
// Stores from a multi-reg source are handled separately.
if (op1->gtSkipReloadOrCopy()->IsMultiRegNode())
{
genMultiRegStoreToLocal(lclNode);
}
else
{
unsigned lclNum = lclNode->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
var_types targetType = varDsc->GetRegisterType(lclNode);
#ifdef DEBUG
var_types op1Type = op1->TypeGet();
if (op1Type == TYP_STRUCT)
{
assert(op1->IsLocal());
GenTreeLclVar* op1LclVar = op1->AsLclVar();
unsigned op1lclNum = op1LclVar->GetLclNum();
LclVarDsc* op1VarDsc = compiler->lvaGetDesc(op1lclNum);
op1Type = op1VarDsc->GetRegisterType(op1LclVar);
}
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1Type));
assert(!varTypeUsesFloatReg(targetType) || (emitTypeSize(targetType) == emitTypeSize(op1Type)));
#endif
#if !defined(TARGET_64BIT)
if (targetType == TYP_LONG)
{
genStoreLongLclVar(lclNode);
return;
}
#endif // !defined(TARGET_64BIT)
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(lclNode);
return;
}
#endif // FEATURE_SIMD
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
emit->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, 0);
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else if (targetReg == REG_NA)
{
// stack store
emit->emitInsStoreLcl(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
// Look for the case where we have a constant zero which we've marked for reuse,
// but which isn't actually in the register we want. In that case, it's better to create
// zero in the target register, because an xor is smaller than a copy. Note that we could
// potentially handle this in the register allocator, but we can't always catch it there
// because the target may not have a register allocated for it yet.
if (op1->isUsedFromReg() && (op1->GetRegNum() != targetReg) && (op1->IsIntegralConst(0) || op1->IsFPZero()))
{
op1->SetRegNum(REG_NA);
op1->ResetReuseRegVal();
op1->SetContained();
}
if (!op1->isUsedFromReg())
{
// Currently, we assume that the non-reg source of a GT_STORE_LCL_VAR writing to a register
// must be a constant. However, in the future we might want to support an operand used from
// memory. This is a bit tricky because we have to decide it can be used from memory before
// register allocation,
// and this would be a case where, once that's done, we need to mark that node as always
// requiring a register - which we always assume now anyway, but once we "optimize" that
// we'll have to take cases like this into account.
assert((op1->GetRegNum() == REG_NA) && op1->OperIsConst());
genSetRegToConst(targetReg, targetType, op1);
}
else
{
assert(targetReg == lclNode->GetRegNum());
assert(op1->GetRegNum() != REG_NA);
inst_Mov_Extend(targetType, /* srcInReg */ true, targetReg, op1->GetRegNum(), /* canSkip */ true,
emitTypeSize(targetType));
}
}
if (targetReg != REG_NA)
{
genProduceReg(lclNode);
}
}
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
const regNumber baseReg = genConsumeReg(base);
regNumber indexReg = genConsumeReg(index);
const regNumber dstReg = node->GetRegNum();
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(baseReg, base->TypeGet());
assert(varTypeIsIntegral(index->TypeGet()));
regNumber tmpReg = REG_NA;
#ifdef TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case that the index
// is a native int on a 64-bit platform, we will need to widen the array length and then compare.
if (index->TypeGet() == TYP_I_IMPL)
{
GetEmitter()->emitIns_R_AR(INS_mov, EA_4BYTE, tmpReg, baseReg, static_cast<int>(node->gtLenOffset));
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, indexReg, tmpReg);
}
else
#endif // TARGET_64BIT
{
GetEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, indexReg, baseReg, static_cast<int>(node->gtLenOffset));
}
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
#ifdef TARGET_64BIT
if (index->TypeGet() != TYP_I_IMPL)
{
// LEA needs 64-bit operands so we need to widen the index if it's TYP_INT.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, tmpReg, indexReg, /* canSkip */ false);
indexReg = tmpReg;
}
#endif // TARGET_64BIT
// Compute the address of the array element.
unsigned scale = node->gtElemSize;
switch (scale)
{
case 1:
case 2:
case 4:
case 8:
tmpReg = indexReg;
break;
default:
#ifdef TARGET_64BIT
// IMUL treats its immediate operand as signed so scale can't be larger than INT32_MAX.
// The VM doesn't allow such large array elements but let's be sure.
noway_assert(scale <= INT32_MAX);
#else // !TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif // !TARGET_64BIT
GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
static_cast<ssize_t>(scale));
scale = 1;
break;
}
GetEmitter()->emitIns_R_ARX(INS_lea, emitTypeSize(node->TypeGet()), dstReg, baseReg, tmpReg, scale,
static_cast<int>(node->gtElemOffset));
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types targetType = tree->TypeGet();
emitter* emit = GetEmitter();
GenTree* addr = tree->Addr();
if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
{
noway_assert(EA_ATTR(genTypeSize(targetType)) == EA_PTRSIZE);
emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->GetRegNum(), FLD_GLOBAL_FS,
(int)addr->AsIntCon()->gtIconVal);
}
else
{
genConsumeAddress(addr);
emit->emitInsLoadInd(ins_Load(targetType), emitTypeSize(tree), tree->GetRegNum(), tree);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
assert(tree->OperIs(GT_STOREIND));
#ifdef FEATURE_SIMD
// Storing Vector3 of size 12 bytes through indirection
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
var_types targetType = tree->TypeGet();
assert(!varTypeIsFloating(targetType) || (genTypeSize(targetType) == genTypeSize(data->TypeGet())));
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering registers are taken care of.
genConsumeOperands(tree);
if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
{
return;
}
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_ARG_0);
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_ARG_0);
// data goes in REG_ARG_1
genCopyRegIfNeeded(data, REG_ARG_1);
genGCWriteBarrier(tree, writeBarrierForm);
}
else
{
bool dataIsUnary = false;
bool isRMWMemoryOp = tree->IsRMWMemoryOp();
GenTree* rmwSrc = nullptr;
// We must consume the operands in the proper execution order, so that liveness is
// updated appropriately.
genConsumeAddress(addr);
// If tree represents a RMW memory op then its data is a non-leaf node marked as contained
// and non-indir operand of data is the source of RMW memory op.
if (isRMWMemoryOp)
{
assert(data->isContained() && !data->OperIsLeaf());
GenTree* rmwDst = nullptr;
dataIsUnary = (GenTree::OperIsUnary(data->OperGet()) != 0);
if (!dataIsUnary)
{
if (tree->IsRMWDstOp1())
{
rmwDst = data->gtGetOp1();
rmwSrc = data->gtGetOp2();
}
else
{
assert(tree->IsRMWDstOp2());
rmwDst = data->gtGetOp2();
rmwSrc = data->gtGetOp1();
}
genConsumeRegs(rmwSrc);
}
else
{
// *(p) = oper *(p): Here addr = p, rmwsrc=rmwDst = *(p) i.e. GT_IND(p)
// For unary RMW ops, src and dst of RMW memory op is the same. Lower
// clears operand counts on rmwSrc and we don't need to perform a
// genConsumeReg() on it.
assert(tree->IsRMWDstOp1());
rmwSrc = data->gtGetOp1();
rmwDst = data->gtGetOp1();
assert(rmwSrc->isUsedFromMemory());
}
assert(rmwSrc != nullptr);
assert(rmwDst != nullptr);
assert(Lowering::IndirsAreEquivalent(rmwDst, tree));
}
else
{
genConsumeRegs(data);
}
if (isRMWMemoryOp)
{
if (dataIsUnary)
{
// generate code for unary RMW memory ops like neg/not
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
}
else
{
if (data->OperIsShiftOrRotate())
{
// Generate code for shift RMW memory ops.
// The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
// <amount> <shift> [addr]).
assert(tree->IsRMWDstOp1());
assert(rmwSrc == data->gtGetOp2());
genCodeForShiftRMW(tree);
}
else if (data->OperGet() == GT_ADD && (rmwSrc->IsIntegralConst(1) || rmwSrc->IsIntegralConst(-1)))
{
// Generate "inc/dec [mem]" instead of "add/sub [mem], 1".
//
// Notes:
// 1) Global morph transforms GT_SUB(x, +/-1) into GT_ADD(x, -/+1).
// 2) TODO-AMD64: Debugger routine NativeWalker::Decode() runs into
// an assert while decoding ModR/M byte of "inc dword ptr [rax]".
// It is not clear whether Decode() can handle all possible
// addr modes with inc/dec. For this reason, inc/dec [mem]
// is not generated while generating debuggable code. Update
// the above if condition once Decode() routine is fixed.
assert(rmwSrc->isContainedIntOrIImmed());
instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
GetEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
}
else
{
// generate code for remaining binary RMW memory ops like add/sub/and/or/xor
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
tree, rmwSrc);
}
}
}
else
{
GetEmitter()->emitInsStoreInd(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
}
}
}
//------------------------------------------------------------------------
// genCodeForSwap: Produce code for a GT_SWAP node.
//
// Arguments:
// tree - the GT_SWAP node
//
void CodeGen::genCodeForSwap(GenTreeOp* tree)
{
assert(tree->OperIs(GT_SWAP));
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lcl1);
var_types type1 = varDsc1->TypeGet();
GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lcl2);
var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeUsesFloatReg(type1) || varTypeUsesFloatReg(type2));
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeUsesFloatReg(type1));
regNumber oldOp1Reg = lcl1->GetRegNum();
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
regNumber oldOp2Reg = lcl2->GetRegNum();
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
varDsc1->SetRegNum(oldOp2Reg);
varDsc2->SetRegNum(oldOp1Reg);
// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
size = EA_GCREF;
}
inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
}
//------------------------------------------------------------------------
// genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
// helper functions.
//
// Arguments:
// writeBarrierForm - the write barrier form to use
// addr - the address at which to do the store
// data - the data to store
//
// Return Value:
// true if an optimized write barrier form was used, false if not. If this
// function returns false, the caller must emit a "standard" write barrier.
bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data)
{
assert(writeBarrierForm != GCInfo::WBF_NoBarrier);
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
if (!genUseOptimizedWriteBarriers(writeBarrierForm))
{
return false;
}
const static int regToHelper[2][8] = {
// If the target is known to be in managed memory
{
CORINFO_HELP_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_ASSIGN_REF_EDI, // EDI
},
// Don't know if the target is in managed memory
{
CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_CHECKED_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_CHECKED_ASSIGN_REF_EDI, // EDI
},
};
noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
noway_assert(regToHelper[0][REG_ESP] == -1);
noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
noway_assert(regToHelper[1][REG_ESP] == -1);
noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
regNumber reg = data->GetRegNum();
noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
// Generate the following code:
// lea edx, addr
// call write_barrier_helper_reg
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_WRITE_BARRIER);
unsigned tgtAnywhere = 0;
if (writeBarrierForm != GCInfo::WBF_BarrierUnchecked)
{
tgtAnywhere = 1;
}
// We might want to call a modified version of genGCWriteBarrier() to get the benefit of
// the FEATURE_COUNT_GC_WRITE_BARRIERS code there, but that code doesn't look like it works
// with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
genEmitHelperCall(regToHelper[tgtAnywhere][reg],
0, // argSize
EA_PTRSIZE); // retSize
return true;
#else // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
return false;
#endif // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
}
// Produce code for a GT_CALL node
void CodeGen::genCall(GenTreeCall* call)
{
genAlignStackBeforeCall(call);
// all virtuals should have been expanded into a control expression
assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
// Insert a GS check if necessary
if (call->IsTailCallViaJitHelper())
{
if (compiler->getNeedsGSSecurityCookie())
{
#if FEATURE_FIXED_OUT_ARGS
// If either of the conditions below is true, we will need a temporary register in order to perform the GS
// cookie check. When FEATURE_FIXED_OUT_ARGS is disabled, we save and restore the temporary register using
// push/pop. When FEATURE_FIXED_OUT_ARGS is enabled, however, we need an alternative solution. For now,
// though, the tail prefix is ignored on all platforms that use fixed out args, so we should never hit this
// case.
assert(compiler->gsGlobalSecurityCookieAddr == nullptr);
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
#endif
genEmitGSCookieCheck(true);
}
}
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
assert(curArgTabEntry);
if (curArgTabEntry->GetRegNum() == REG_STK)
{
continue;
}
#ifdef UNIX_AMD64_ABI
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
regNumber argReg = curArgTabEntry->GetRegNum(regIndex++);
genConsumeReg(putArgRegNode);
// Validate the putArgRegNode has the right type.
assert(varTypeUsesFloatReg(putArgRegNode->TypeGet()) == genIsValidFloatReg(argReg));
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ false, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
}
}
else
#endif // UNIX_AMD64_ABI
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ false, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
// In the case of a varargs call,
// the ABI dictates that if we have floating point args,
// we must pass the enregistered arguments in both the
// integer and floating point registers so, let's do that.
if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode))
{
regNumber srcReg = argNode->GetRegNum();
regNumber targetReg = compiler->getCallArgIntRegister(argNode->GetRegNum());
inst_Mov(TYP_LONG, targetReg, srcReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
}
#if defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// The call will pop its arguments.
// for each putarg_stk:
target_ssize_t stackArgBytes = 0;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* arg = use.GetNode();
if (arg->OperIs(GT_PUTARG_STK) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
GenTree* source = arg->AsPutArgStk()->gtGetOp1();
unsigned size = arg->AsPutArgStk()->GetStackByteSize();
stackArgBytes += size;
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
assert(curArgTabEntry != nullptr);
assert(size == (curArgTabEntry->numSlots * TARGET_POINTER_SIZE));
#ifdef FEATURE_PUT_STRUCT_ARG_STK
if (!source->OperIs(GT_FIELD_LIST) && (source->TypeGet() == TYP_STRUCT))
{
GenTreeObj* obj = source->AsObj();
unsigned argBytes = roundUp(obj->GetLayout()->GetSize(), TARGET_POINTER_SIZE);
#ifdef TARGET_X86
// If we have an OBJ, we must have created a copy if the original arg was not a
// local and was not a multiple of TARGET_POINTER_SIZE.
// Note that on x64/ux this will be handled by unrolling in genStructPutArgUnroll.
assert((argBytes == obj->GetLayout()->GetSize()) || obj->Addr()->IsLocalAddrExpr());
#endif // TARGET_X86
assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
#endif // DEBUG
}
}
#endif // defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
GetEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that the registers used
// for the target (e.g. contained indir) are loaded into volatile registers
// that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
if (target->isContainedIndir())
{
genConsumeAddress(target->AsIndir()->Addr());
}
else
{
assert(!target->isContained());
genConsumeReg(target);
}
}
return;
}
// For a pinvoke to unmanged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
#if defined(DEBUG) && defined(TARGET_X86)
// Store the stack pointer so we can check it after the call.
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
// When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
// if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
// transition penalty, assuming the user function contains legacy SSE instruction.
// To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
// VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
// when there's preceding 256-bit AVX to legacy SSE transition penalty.
if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && GetEmitter()->Contains256bitAVX())
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
genCallInstruction(call X86_ARG(stackArgBytes));
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
#ifdef TARGET_X86
if (varTypeIsFloating(returnType))
{
// Spill the value from the fp stack.
// Then, load it into the target register.
call->gtFlags |= GTF_SPILL;
regSet.rsSpillFPStack(call);
call->gtFlags |= GTF_SPILLED;
call->gtFlags &= ~GTF_SPILL;
}
else
#endif // TARGET_X86
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc != nullptr);
const unsigned regCount = retTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = retTypeDesc->GetReturnRegType(i);
returnReg = retTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
#ifdef FEATURE_SIMD
// A Vector3 return value is stored in xmm0 and xmm1.
// RyuJIT assumes that the upper unused bits of xmm1 are cleared but
// the native compiler doesn't guarantee it.
if (call->IsUnmanaged() && (returnType == TYP_SIMD12))
{
returnReg = retTypeDesc->GetABIReturnReg(1);
// Clear the upper 32 bits by two shift instructions.
// retReg = retReg << 96
// retReg = retReg >> 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
}
#endif // FEATURE_SIMD
}
else
{
#ifdef TARGET_X86
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
// correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else
#endif // TARGET_X86
if (varTypeIsFloating(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ true);
}
genProduceReg(call);
}
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && compiler->opts.OptimizationEnabled())
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
#if defined(DEBUG) && defined(TARGET_X86)
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
if (!call->CallerPop() && (stackArgBytes != 0))
{
// ECX is trashed, so can be used to compute the expected SP. We saved the value of SP
// after pushing all the stack arguments, but the caller popped the arguments, so we need
// to do some math to figure a good comparison.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_ARG_0, stackArgBytes);
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_ARG_0, compiler->lvaCallSpCheck, 0);
}
else
{
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
BasicBlock* sp_check = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
#if !defined(FEATURE_EH_FUNCLETS)
//-------------------------------------------------------------------------
// Create a label for tracking of region protected by the monitor in synchronized methods.
// This needs to be here, rather than above where fPossibleSyncHelperCall is set,
// so the GC state vars have been updated before creating the label.
if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH))
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
switch (helperNum)
{
case CORINFO_HELP_MON_ENTER:
case CORINFO_HELP_MON_ENTER_STATIC:
noway_assert(compiler->syncStartEmitCookie == NULL);
compiler->syncStartEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncStartEmitCookie != NULL);
break;
case CORINFO_HELP_MON_EXIT:
case CORINFO_HELP_MON_EXIT_STATIC:
noway_assert(compiler->syncEndEmitCookie == NULL);
compiler->syncEndEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncEndEmitCookie != NULL);
break;
default:
break;
}
}
#endif // !FEATURE_EH_FUNCLETS
unsigned stackAdjustBias = 0;
#if defined(TARGET_X86)
// Is the caller supposed to pop the arguments?
if (call->CallerPop() && (stackArgBytes != 0))
{
stackAdjustBias = stackArgBytes;
}
SubtractStackLevel(stackArgBytes);
#endif // TARGET_X86
genRemoveAlignmentAfterCall(call, stackAdjustBias);
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes))
{
#if defined(TARGET_X86)
// If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
// adjust its stack level accordingly.
// If the caller needs to explicitly pop its arguments, we must pass a negative value, and then do the
// pop when we're done.
target_ssize_t argSizeForEmitter = stackArgBytes;
if (call->CallerPop())
{
argSizeForEmitter = -stackArgBytes;
}
#endif // defined(TARGET_X86)
// Determine return value size(s).
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
}
else
{
assert(!varTypeIsStruct(call));
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
// We need to propagate the IL offset information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
DebugInfo di;
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
#ifdef TARGET_X86
if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
{
// On x86, we need to generate a very specific pattern for indirect VSD calls:
//
// 3-byte nop
// call dword ptr [eax]
//
// Where EAX is also used as an argument to the stub dispatch helper. Make
// sure that the call target address is computed into EAX in this case.
assert(compiler->virtualStubParamInfo->GetReg() == REG_VIRTUAL_STUB_TARGET);
assert(target->isContainedIndir());
assert(target->OperGet() == GT_IND);
GenTree* addr = target->AsIndir()->Addr();
assert(addr->isUsedFromReg());
genConsumeReg(addr);
genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
GetEmitter()->emitIns_Nop(3);
// clang-format off
GetEmitter()->emitIns_Call(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
argSizeForEmitter,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, REG_VIRTUAL_STUB_TARGET, REG_NA, 1, 0);
// clang-format on
}
else
#endif
if (target->isContainedIndir())
{
// When CFG is enabled we should not be emitting any non-register indirect calls.
assert(!compiler->opts.IsCFGEnabled() ||
call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL) ||
call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL));
if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
{
// Note that if gtControlExpr is an indir of an absolute address, we mark it as
// contained only if it can be encoded as PC-relative offset.
assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
else
{
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeAddress(target->AsIndir()->Addr());
}
// clang-format off
genEmitCallIndir(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
target->AsIndir()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
call->IsFastTailCall());
// clang-format on
}
}
else
{
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
assert(genIsValidIntReg(target->GetRegNum()));
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr // addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
}
else
{
// If we have no target and this is a call with indirection cell
// then emit call through that indir cell. This means we generate e.g.
// lea r11, [addr of cell]
// call [r11]
// which is more efficent than
// lea r11, [addr of cell]
// call [addr of cell]
regNumber indirCellReg = getCallIndirectionCellReg(call);
if (indirCellReg != REG_NA)
{
// clang-format off
GetEmitter()->emitIns_Call(
emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
0,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, indirCellReg, REG_NA, 0, 0,
call->IsFastTailCall());
// clang-format on
}
#ifdef FEATURE_READYTORUN
else if (call->gtEntryPoint.addr != nullptr)
{
emitter::EmitCallType type =
(call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN : emitter::EC_FUNC_TOKEN_INDIR;
// clang-format off
genEmitCall(type,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*)call->gtEntryPoint.addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
#endif
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
if (call->gtCallType == CT_HELPER)
{
// Direct call to a helper method.
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct calls to known addresses
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
{
continue;
}
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(!varDsc->lvIsStructField || (compiler->lvaGetDesc(varDsc->lvParentLcl)->lvFieldCnt == 1));
var_types storeType = varDsc->GetActualRegisterType(); // We own the memory and can use the full move.
GetEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->GetRegNum(), varNum, 0);
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = varDsc->lvRegMask();
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
}
#endif // DEBUG
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
{
continue;
}
#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
assert(structDesc.passedInRegisters);
unsigned __int8 offset0 = 0;
unsigned __int8 offset1 = 0;
var_types type0 = TYP_UNKNOWN;
var_types type1 = TYP_UNKNOWN;
// Get the eightbyte data
compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
// Move the values into the right registers.
//
// Update varDsc->GetArgReg() and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
// argReg is going live. Note that we cannot modify varDsc->GetRegNum() and lvOtherArgReg here
// because another basic block may not be expecting it.
// Therefore manually update life of argReg. Note that GT_JMP marks
// the end of the basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
if (type0 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->GetArgReg(), varNum, offset0);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetArgReg(), type0);
}
if (type1 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->GetOtherArgReg(), varNum,
offset1);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetOtherArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetOtherArgReg(), type1);
}
if (varDsc->lvTracked)
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
else
#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
noway_assert(
isRegParamType(genActualType(varDsc->TypeGet())) ||
(varTypeIsStruct(varDsc->TypeGet()) && compiler->isTrivialPointerSizedStruct(varDsc->GetStructHnd())));
#else
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
#endif // TARGET_X86
// Is register argument already in the right register?
// If not load it from its stack location.
var_types loadType = varDsc->GetRegisterType();
#ifdef TARGET_X86
if (varTypeIsStruct(varDsc->TypeGet()))
{
// Treat trivial pointer-sized structs as a pointer sized primitive
// for the purposes of registers.
loadType = TYP_I_IMPL;
}
#endif
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varDsc->GetRegNum() != argReg)
{
assert(genIsValidReg(argReg));
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of the
// basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming dead\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing dead\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
#if defined(TARGET_AMD64)
// In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg
// register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to
// be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point
// values on the stack.
if (compFeatureVarArg() && compiler->info.compIsVarArgs)
{
regNumber intArgReg;
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varTypeIsFloating(loadType))
{
intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
else
{
intArgReg = argReg;
}
fixedIntArgMask |= genRegMask(intArgReg);
if (intArgReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#endif // TARGET_AMD64
}
#if defined(TARGET_AMD64)
// Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
// load the remaining arg registers (both int and float) from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing float/double args both in int and float arg regs.
//
// This doesn't apply to x86, which doesn't pass floating point values in floating
// point registers.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (compFeatureVarArg() && fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
// also load it in corresponding float arg reg
regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
inst_Mov(TYP_DOUBLE, floatReg, argReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
#endif // TARGET_AMD64
}
// produce code for a GT_LEA subnode
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
emitAttr size = emitTypeSize(lea);
genConsumeOperands(lea);
if (lea->Base() && lea->Index())
{
regNumber baseReg = lea->Base()->GetRegNum();
regNumber indexReg = lea->Index()->GetRegNum();
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), baseReg, indexReg, lea->gtScale, lea->Offset());
}
else if (lea->Base())
{
GetEmitter()->emitIns_R_AR(INS_lea, size, lea->GetRegNum(), lea->Base()->GetRegNum(), lea->Offset());
}
else if (lea->Index())
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), REG_NA, lea->Index()->GetRegNum(), lea->gtScale,
lea->Offset());
}
genProduceReg(lea);
}
//------------------------------------------------------------------------
// genCompareFloat: Generate code for comparing two floating point values
//
// Arguments:
// treeNode - the compare tree
//
void CodeGen::genCompareFloat(GenTree* treeNode)
{
assert(treeNode->OperIsCompare());
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
genConsumeOperands(tree);
assert(varTypeIsFloating(op1Type));
assert(op1Type == op2Type);
regNumber targetReg = treeNode->GetRegNum();
instruction ins;
emitAttr cmpAttr;
GenCondition condition = GenCondition::FromFloatRelop(treeNode);
if (condition.PreferSwap())
{
condition = GenCondition::Swap(condition);
std::swap(op1, op2);
}
ins = (op1Type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd;
cmpAttr = emitTypeSize(op1Type);
var_types targetType = treeNode->TypeGet();
// Clear target reg in advance via "xor reg,reg" to avoid movzx after SETCC
if ((targetReg != REG_NA) && (op1->GetRegNum() != targetReg) && (op2->GetRegNum() != targetReg) &&
!varTypeIsByte(targetType))
{
regMaskTP targetRegMask = genRegMask(targetReg);
if (((op1->gtGetContainedRegMask() | op2->gtGetContainedRegMask()) & targetRegMask) == 0)
{
instGen_Set_Reg_To_Zero(emitTypeSize(TYP_I_IMPL), targetReg);
targetType = TYP_BOOL; // just a tip for inst_SETCC that movzx is not needed
}
}
GetEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
if ((condition.GetCode() == GenCondition::FNEU) && (op1->GetRegNum() == op2->GetRegNum()))
{
// For floating point, `x != x` is a common way of
// checking for NaN. So, in the case where both
// operands are the same, we can optimize codegen
// to only do a single check.
condition = GenCondition(GenCondition::P);
}
inst_SETCC(condition, targetType, targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCompareInt: Generate code for comparing ints or, on amd64, longs.
//
// Arguments:
// treeNode - the compare tree
//
// Return Value:
// None.
void CodeGen::genCompareInt(GenTree* treeNode)
{
assert(treeNode->OperIsCompare() || treeNode->OperIs(GT_CMP));
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
bool canReuseFlags = false;
genConsumeOperands(tree);
assert(!op1->isContainedIntOrIImmed());
assert(!varTypeIsFloating(op2Type));
instruction ins;
var_types type = TYP_UNKNOWN;
if (tree->OperIs(GT_TEST_EQ, GT_TEST_NE))
{
ins = INS_test;
// Unlike many xarch instructions TEST doesn't have a form with a 16/32/64 bit first operand and
// an 8 bit immediate second operand. But if the immediate value fits in 8 bits then we can simply
// emit a 8 bit TEST instruction, unless we're targeting x86 and the first operand is a non-byteable
// register.
// Note that lowering does something similar but its main purpose is to allow memory operands to be
// contained so it doesn't handle other kind of operands. It could do more but on x86 that results
// in additional register constrains and that may be worse than wasting 3 bytes on an immediate.
if (
#ifdef TARGET_X86
(!op1->isUsedFromReg() || isByteReg(op1->GetRegNum())) &&
#endif
(op2->IsCnsIntOrI() && FitsIn<uint8_t>(op2->AsIntCon()->IconValue())))
{
type = TYP_UBYTE;
}
}
else if (op1->isUsedFromReg() && op2->IsIntegralConst(0))
{
if (compiler->opts.OptimizationEnabled())
{
emitAttr op1Size = emitActualTypeSize(op1->TypeGet());
assert((int)op1Size >= 4);
// Optimize "x<0" and "x>=0" to "x>>31" if "x" is not a jump condition and in a reg.
// Morph/Lowering are responsible to rotate "0<x" to "x>0" so we won't handle it here.
if ((targetReg != REG_NA) && tree->OperIs(GT_LT, GT_GE) && !tree->IsUnsigned())
{
inst_Mov(op1->TypeGet(), targetReg, op1->GetRegNum(), /* canSkip */ true);
if (tree->OperIs(GT_GE))
{
// emit "not" for "x>=0" case
inst_RV(INS_not, targetReg, op1->TypeGet());
}
inst_RV_IV(INS_shr_N, targetReg, (int)op1Size * 8 - 1, op1Size);
genProduceReg(tree);
return;
}
canReuseFlags = true;
}
// We're comparing a register to 0 so we can generate "test reg1, reg1"
// instead of the longer "cmp reg1, 0"
ins = INS_test;
op2 = op1;
}
else
{
ins = INS_cmp;
}
if (type == TYP_UNKNOWN)
{
if (op1Type == op2Type)
{
type = op1Type;
}
else if (genTypeSize(op1Type) == genTypeSize(op2Type))
{
// If the types are different but have the same size then we'll use TYP_INT or TYP_LONG.
// This primarily deals with small type mixes (e.g. byte/ubyte) that need to be widened
// and compared as int. We should not get long type mixes here but handle that as well
// just in case.
type = genTypeSize(op1Type) == 8 ? TYP_LONG : TYP_INT;
}
else
{
// In the types are different simply use TYP_INT. This deals with small type/int type
// mixes (e.g. byte/short ubyte/int) that need to be widened and compared as int.
// Lowering is expected to handle any mixes that involve long types (e.g. int/long).
type = TYP_INT;
}
// The common type cannot be smaller than any of the operand types, we're probably mixing int/long
assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
// Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
// If op1 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
// If op2 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op2Type) >= genTypeSize(type)) || !op2->isUsedFromMemory());
// If we ended up with a small type and op2 is a constant then make sure we don't lose constant bits
assert(!op2->IsCnsIntOrI() || !varTypeIsSmall(type) || FitsIn(type, op2->AsIntCon()->IconValue()));
}
// The type cannot be larger than the machine word size
assert(genTypeSize(type) <= genTypeSize(TYP_I_IMPL));
// TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
// Sign jump optimization should only be set the following check
assert((tree->gtFlags & GTF_RELOP_SJUMP_OPT) == 0);
var_types targetType = tree->TypeGet();
if (canReuseFlags && emit->AreFlagsSetToZeroCmp(op1->GetRegNum(), emitTypeSize(type), tree->OperGet()))
{
JITDUMP("Not emitting compare due to flags being already set\n");
}
else if (canReuseFlags && emit->AreFlagsSetForSignJumpOpt(op1->GetRegNum(), emitTypeSize(type), tree))
{
JITDUMP("Not emitting compare due to sign being already set, follow up instr will transform jump\n");
tree->gtFlags |= GTF_RELOP_SJUMP_OPT;
}
else
{
// Clear target reg in advance via "xor reg,reg" to avoid movzx after SETCC
if ((targetReg != REG_NA) && (op1->GetRegNum() != targetReg) && (op2->GetRegNum() != targetReg) &&
!varTypeIsByte(targetType))
{
regMaskTP targetRegMask = genRegMask(targetReg);
if (((op1->gtGetContainedRegMask() | op2->gtGetContainedRegMask()) & targetRegMask) == 0)
{
instGen_Set_Reg_To_Zero(emitTypeSize(TYP_I_IMPL), targetReg);
targetType = TYP_BOOL; // just a tip for inst_SETCC that movzx is not needed
}
}
emit->emitInsBinary(ins, emitTypeSize(type), op1, op2);
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromIntegralRelop(tree), targetType, targetReg);
genProduceReg(tree);
}
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genLongToIntCast: Generate code for long to int casts on x86.
//
// Arguments:
// cast - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// The cast node and its sources (via GT_LONG) must have been assigned registers.
// The destination cannot be a floating point type or a small integer type.
//
void CodeGen::genLongToIntCast(GenTree* cast)
{
assert(cast->OperGet() == GT_CAST);
GenTree* src = cast->gtGetOp1();
noway_assert(src->OperGet() == GT_LONG);
genConsumeRegs(src);
var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
var_types dstType = cast->CastToType();
regNumber loSrcReg = src->gtGetOp1()->GetRegNum();
regNumber hiSrcReg = src->gtGetOp2()->GetRegNum();
regNumber dstReg = cast->GetRegNum();
assert((dstType == TYP_INT) || (dstType == TYP_UINT));
assert(genIsValidIntReg(loSrcReg));
assert(genIsValidIntReg(hiSrcReg));
assert(genIsValidIntReg(dstReg));
if (cast->gtOverflow())
{
//
// Generate an overflow check for [u]long to [u]int casts:
//
// long -> int - check if the upper 33 bits are all 0 or all 1
//
// ulong -> int - check if the upper 33 bits are all 0
//
// long -> uint - check if the upper 32 bits are all 0
// ulong -> uint - check if the upper 32 bits are all 0
//
if ((srcType == TYP_LONG) && (dstType == TYP_INT))
{
BasicBlock* allOne = genCreateTempLabel();
BasicBlock* success = genCreateTempLabel();
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
inst_JMP(EJ_js, allOne);
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
inst_JMP(EJ_jmp, success);
genDefineTempLabel(allOne);
inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
genDefineTempLabel(success);
}
else
{
if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
{
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_js, SCK_OVERFLOW);
}
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
}
inst_Mov(TYP_INT, dstReg, loSrcReg, /* canSkip */ true);
genProduceReg(cast);
}
#endif
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_R(INS_test, EA_SIZE(desc.CheckSrcSize()), reg, reg);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
{
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in an immediate operand. Use a right shift to test if the
// upper 32 bits are zero. This requires a temporary register.
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
GetEmitter()->emitIns_Mov(INS_mov, EA_8BYTE, tempReg, reg, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_shr_N, EA_8BYTE, tempReg, 32);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_ja, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MIN);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_ja : EJ_jg, SCK_OVERFLOW);
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
// On x86 casts to (U)BYTE require that the source be in a byte register.
//
// TODO-XArch-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
emitter* emit = GetEmitter();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
instruction ins;
unsigned insSize;
bool canSkip = false;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = INS_movzx;
insSize = desc.ExtendSrcSize();
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = INS_movsx;
insSize = desc.ExtendSrcSize();
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
canSkip = compiler->opts.OptimizationEnabled() && emit->AreUpper32BitsZero(srcReg);
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_movsxd;
insSize = 4;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
canSkip = true;
break;
}
emit->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, canSkip);
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double or vice versa.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
// If not contained, must be a valid float reg.
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
if (srcType == dstType && (op1->isUsedFromReg() && (targetReg == op1->GetRegNum())))
{
// source and destinations types are the same and also reside in the same register.
// we just need to consume and produce the reg in this case.
;
}
else
{
instruction ins = ins_FloatConv(dstType, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidIntReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
#if !defined(TARGET_64BIT)
// We expect morph to replace long to float/double casts with helper calls
noway_assert(!varTypeIsLong(srcType));
#endif // !defined(TARGET_64BIT)
// Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we
// ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except
// for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
// as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
// Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
// temp and using temp as operand of cast operation.
if (srcType == TYP_BYREF)
{
noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
srcType = TYP_I_IMPL;
}
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
noway_assert(!varTypeIsGC(srcType));
// We should never be seeing srcType whose size is not sizeof(int) nor sizeof(long).
// For conversions from byte/sbyte/int16/uint16 to float/double, we would expect
// either the front-end or lowering phase to have generated two levels of cast.
// The first one is for widening smaller int type to int32 and the second one is
// to the float/double.
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
// Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
// here since they should have been lowered apropriately.
noway_assert(srcType != TYP_UINT);
noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
// To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
// which does a partial write to lower 4/8 bytes of xmm register keeping the other
// upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
// the partial write could introduce a false dependency and could cause a stall
// if there are further uses of xmmReg. We have such a case occurring with a
// customer reported version of SpectralNorm benchmark, resulting in 2x perf
// regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
// cvtsi2ss/sd instruction.
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->GetRegNum(), treeNode->GetRegNum());
// Note that here we need to specify srcType that will determine
// the size of source reg/mem operand and rex.w prefix.
instruction ins = ins_FloatConv(dstType, TYP_INT);
GetEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
// Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
// will interpret ULONG value as LONG. Hence we need to adjust the
// result if sign-bit of srcType is set.
if (srcType == TYP_ULONG)
{
// The instruction sequence below is less accurate than what clang
// and gcc generate. However, we keep the current sequence for backward compatibility.
// If we change the instructions below, FloatingPointUtils::convertUInt64ToDobule
// should be also updated for consistent conversion result.
assert(dstType == TYP_DOUBLE);
assert(op1->isUsedFromReg());
// Set the flags without modifying op1.
// test op1Reg, op1Reg
inst_RV_RV(INS_test, op1->GetRegNum(), op1->GetRegNum(), srcType);
// No need to adjust result if op1 >= 0 i.e. positive
// Jge label
BasicBlock* label = genCreateTempLabel();
inst_JMP(EJ_jge, label);
// Adjust the result
// result = result + 0x43f00000 00000000
// addsd resultReg, 0x43f00000 00000000
CORINFO_FIELD_HANDLE* cns = &u8ToDblBitmask;
if (*cns == nullptr)
{
double d;
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x43f0000000000000LL;
*cns = GetEmitter()->emitFltOrDblConst(d, EA_8BYTE);
}
GetEmitter()->emitIns_R_C(INS_addsd, EA_8BYTE, treeNode->GetRegNum(), *cns, 0);
genDefineTempLabel(label);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int/long
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
// TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
// For conversions to byte/sbyte/int16/uint16 from float/double, we would expect the
// front-end or lowering phase to have generated two levels of cast. The first one is
// for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
// the required smaller int type.
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
// We shouldn't be seeing uint64 here as it should have been converted
// into a helper call by either front-end or lowering phase.
noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
// If the dstType is TYP_UINT, we have 32-bits to encode the
// float number. Any of 33rd or above bits can be the sign bit.
// To achieve it we pretend as if we are converting it to a long.
if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
{
dstType = TYP_LONG;
}
// Note that we need to specify dstType here so that it will determine
// the size of destination integer register and also the rex.w prefix.
genConsumeOperands(treeNode->AsOp());
instruction ins = ins_FloatConv(TYP_INT, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
// TODO-XArch-CQ - mark the operand as contained if known to be in
// memory (e.g. field or an array element).
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
GenTree* op1 = treeNode->AsOp()->gtOp1;
var_types targetType = treeNode->TypeGet();
int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
regNumber targetReg = treeNode->GetRegNum();
// Extract exponent into a register.
regNumber tmpReg = treeNode->GetSingleTempReg();
genConsumeReg(op1);
#ifdef TARGET_64BIT
// Copy the floating-point value to an integer register. If we copied a float to a long, then
// right-shift the value so the high 32 bits of the floating-point value sit in the low 32
// bits of the integer register.
regNumber srcReg = op1->GetRegNum();
var_types targetIntType = ((targetType == TYP_FLOAT) ? TYP_INT : TYP_LONG);
inst_Mov(targetIntType, tmpReg, srcReg, /* canSkip */ false, emitActualTypeSize(targetType));
if (targetType == TYP_DOUBLE)
{
// right shift by 32 bits to get to exponent.
inst_RV_SH(INS_shr, EA_8BYTE, tmpReg, 32);
}
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
#else // !TARGET_64BIT
// If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register.
// There is no easy way to do this. To not require an extra register, we'll use shuffles
// to move the high 32 bits into the low 32 bits, then shuffle it back, since we
// need to produce the value into the target register.
//
// For TYP_DOUBLE, we'll generate (for targetReg != op1->GetRegNum()):
// movaps targetReg, op1->GetRegNum()
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // copy the value again, instead of un-shuffling it
//
// For TYP_DOUBLE with (targetReg == op1->GetRegNum()):
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX
//
// For TYP_FLOAT, it's the same as TARGET_64BIT:
// mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // only if targetReg != op1->GetRegNum()
regNumber copyToTmpSrcReg; // The register we'll copy to the integer temp.
if (targetType == TYP_DOUBLE)
{
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
copyToTmpSrcReg = targetReg;
}
else
{
copyToTmpSrcReg = op1->GetRegNum();
}
// Copy only the low 32 bits. This will be the high order 32 bits of the floating-point
// value, no matter the floating-point type.
inst_Mov(TYP_INT, tmpReg, copyToTmpSrcReg, /* canSkip */ false, emitActualTypeSize(TYP_FLOAT));
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
if ((targetType == TYP_DOUBLE) && (targetReg == op1->GetRegNum()))
{
// We need to re-shuffle the targetReg to get the correct result.
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
}
else
{
// In both the TYP_FLOAT and TYP_DOUBLE case, the op1 register is untouched,
// so copy it to the targetReg. This is faster and smaller for TYP_DOUBLE
// than re-shuffling the targetReg.
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
}
#endif // !TARGET_64BIT
genProduceReg(treeNode);
}
#ifdef TARGET_AMD64
int CodeGenInterface::genSPtoFPdelta() const
{
int delta;
#ifdef UNIX_AMD64_ABI
// We require frame chaining on Unix to support native tool unwinding (such as
// unwinding by the native debugger). We have a CLR-only extension to the
// unwind codes (UWOP_SET_FPREG_LARGE) to support SP->FP offsets larger than 240.
// If Unix ever supports EnC, the RSP == RBP assumption will have to be reevaluated.
delta = genTotalFrameSize();
#else // !UNIX_AMD64_ABI
// As per Amd64 ABI, RBP offset from initial RSP can be between 0 and 240 if
// RBP needs to be reported in unwind codes. This case would arise for methods
// with localloc.
if (compiler->compLocallocUsed)
{
// We cannot base delta computation on compLclFrameSize since it changes from
// tentative to final frame layout and hence there is a possibility of
// under-estimating offset of vars from FP, which in turn results in under-
// estimating instruction size.
//
// To be predictive and so as never to under-estimate offset of vars from FP
// we will always position FP at min(240, outgoing arg area size).
delta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize);
}
else if (compiler->opts.compDbgEnC)
{
// vm assumption on EnC methods is that rsp and rbp are equal
delta = 0;
}
else
{
delta = genTotalFrameSize();
}
#endif // !UNIX_AMD64_ABI
return delta;
}
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc. For AMD64, this does not include the caller-pushed
// return address.
//
// Return value:
// Total frame size
//
int CodeGenInterface::genTotalFrameSize() const
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
//
// We can't compute this directly from the Caller-SP, since the frame pointer
// is based on a maximum delta from Initial-SP, so first we find SP, then
// compute the FP offset.
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta;
callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
callerSPtoSPdelta -= genTotalFrameSize();
callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
if (isFramePointerUsed())
{
callerSPtoSPdelta -= REGSIZE_BYTES;
}
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
#endif // TARGET_AMD64
//-----------------------------------------------------------------------------------------
// genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
// ii) tree type is floating point type.
// iii) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE2BitwiseOp(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
regNumber operandReg = genConsumeReg(treeNode->gtGetOp1());
emitAttr size = emitTypeSize(treeNode);
assert(varTypeIsFloating(treeNode->TypeGet()));
assert(treeNode->gtGetOp1()->isUsedFromReg());
CORINFO_FIELD_HANDLE* maskFld = nullptr;
UINT64 mask = 0;
instruction ins = INS_invalid;
if (treeNode->OperIs(GT_NEG))
{
// Neg(x) = flip the sign bit.
// Neg(f) = f ^ 0x80000000 x4 (packed)
// Neg(d) = d ^ 0x8000000000000000 x2 (packed)
ins = INS_xorps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x8000000080000000UL : 0x8000000000000000UL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &negBitmaskFlt : &negBitmaskDbl;
}
else if (treeNode->OperIs(GT_INTRINSIC))
{
assert(treeNode->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Abs);
// Abs(x) = set sign-bit to zero
// Abs(f) = f & 0x7fffffff x4 (packed)
// Abs(d) = d & 0x7fffffffffffffff x2 (packed)
ins = INS_andps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x7fffffff7fffffffUL : 0x7fffffffffffffffUL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &absBitmaskFlt : &absBitmaskDbl;
}
else
{
assert(!"genSSE2BitwiseOp: unsupported oper");
}
if (*maskFld == nullptr)
{
UINT64 maskPack[] = {mask, mask};
*maskFld = GetEmitter()->emitBlkConst(&maskPack, 16, 16, treeNode->TypeGet());
}
GetEmitter()->emitIns_SIMD_R_R_C(ins, size, targetReg, operandReg, *maskFld, 0);
}
//-----------------------------------------------------------------------------------------
// genSSE41RoundOp - generate SSE41 code for the given tree as a round operation
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) SSE4.1 is supported by the underlying hardware
// ii) treeNode oper is a GT_INTRINSIC
// iii) treeNode type is a floating point type
// iv) treeNode is not used from memory
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
// vi) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE41RoundOp(GenTreeOp* treeNode)
{
// i) SSE4.1 is supported by the underlying hardware
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_SSE41));
// ii) treeNode oper is a GT_INTRINSIC
assert(treeNode->OperGet() == GT_INTRINSIC);
GenTree* srcNode = treeNode->gtGetOp1();
// iii) treeNode type is floating point type
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// iv) treeNode is not used from memory
assert(!treeNode->isUsedFromMemory());
genConsumeOperands(treeNode);
instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_roundss : INS_roundsd;
emitAttr size = emitTypeSize(treeNode);
regNumber dstReg = treeNode->GetRegNum();
unsigned ival = 0;
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Round:
ival = 4;
break;
case NI_System_Math_Ceiling:
ival = 10;
break;
case NI_System_Math_Floor:
ival = 9;
break;
case NI_System_Math_Truncate:
ival = 11;
break;
default:
ins = INS_invalid;
assert(!"genSSE41RoundOp: unsupported intrinsic");
unreached();
}
if (srcNode->isContained() || srcNode->isUsedFromSpillTemp())
{
emitter* emit = GetEmitter();
TempDsc* tmpDsc = nullptr;
unsigned varNum = BAD_VAR_NUM;
unsigned offset = (unsigned)-1;
if (srcNode->isUsedFromSpillTemp())
{
assert(srcNode->IsRegOptional());
tmpDsc = getSpillTempDsc(srcNode);
varNum = tmpDsc->tdTempNum();
offset = 0;
regSet.tmpRlsTemp(tmpDsc);
}
else if (srcNode->isIndir())
{
GenTreeIndir* memIndir = srcNode->AsIndir();
GenTree* memBase = memIndir->gtOp1;
switch (memBase->OperGet())
{
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
{
assert(memBase->isContained());
varNum = memBase->AsLclVarCommon()->GetLclNum();
offset = memBase->AsLclVarCommon()->GetLclOffs();
// Ensure that all the GenTreeIndir values are set to their defaults.
assert(memBase->GetRegNum() == REG_NA);
assert(!memIndir->HasIndex());
assert(memIndir->Scale() == 1);
assert(memIndir->Offset() == 0);
break;
}
case GT_CLS_VAR_ADDR:
{
emit->emitIns_R_C_I(ins, size, dstReg, memBase->AsClsVar()->gtClsVarHnd, 0, ival);
return;
}
default:
{
emit->emitIns_R_A_I(ins, size, dstReg, memIndir, ival);
return;
}
}
}
else
{
switch (srcNode->OperGet())
{
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = srcNode->AsDblCon();
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(dblConst->gtDconVal, emitTypeSize(dblConst));
emit->emitIns_R_C_I(ins, size, dstReg, hnd, 0, ival);
return;
}
case GT_LCL_FLD:
varNum = srcNode->AsLclFld()->GetLclNum();
offset = srcNode->AsLclFld()->GetLclOffs();
break;
case GT_LCL_VAR:
{
assert(srcNode->IsRegOptional() || !compiler->lvaGetDesc(srcNode->AsLclVar())->lvIsRegCandidate());
varNum = srcNode->AsLclVar()->GetLclNum();
offset = 0;
break;
}
default:
unreached();
break;
}
}
// Ensure we got a good varNum and offset.
// We also need to check for `tmpDsc != nullptr` since spill temp numbers
// are negative and start with -1, which also happens to be BAD_VAR_NUM.
assert((varNum != BAD_VAR_NUM) || (tmpDsc != nullptr));
assert(offset != (unsigned)-1);
emit->emitIns_R_S_I(ins, size, dstReg, varNum, offset, ival);
}
else
{
inst_RV_RV_IV(ins, size, dstReg, srcNode->GetRegNum(), ival);
}
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
// Handle intrinsics that can be implemented by target-specific instructions
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genSSE2BitwiseOp(treeNode);
break;
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
genSSE41RoundOp(treeNode->AsOp());
break;
case NI_System_Math_Sqrt:
{
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
genConsumeOperands(treeNode->AsOp());
const instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_sqrtss : INS_sqrtsd;
GetEmitter()->emitInsBinary(ins, emitTypeSize(treeNode), treeNode, srcNode);
break;
}
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//-------------------------------------------------------------------------- //
// getBaseVarForPutArgStk - returns the baseVarNum for passing a stack arg.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// The number of the base variable.
//
// Note:
// If tail call the outgoing args are placed in the caller's incoming arg stack space.
// Otherwise, they go in the outgoing arg area on the current frame.
//
// On Windows the caller always creates slots (homing space) in its frame for the
// first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
// For System V systems there is no such calling convention requirement, and the code needs to find
// the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with GetArgReg() equals to REG_STK.
//
unsigned CodeGen::getBaseVarForPutArgStk(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_PUTARG_STK);
unsigned baseVarNum;
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->AsPutArgStk()->putInIncomingArgArea())
{
// See the note in the function header re: finding the first stack passed argument.
baseVarNum = getFirstArgWithStackSlot();
assert(baseVarNum != BAD_VAR_NUM);
#ifdef DEBUG
// This must be a fast tail call.
assert(treeNode->AsPutArgStk()->gtCall->AsCall()->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(baseVarNum);
assert(varDsc != nullptr);
#ifdef UNIX_AMD64_ABI
assert(!varDsc->lvIsRegArg && varDsc->GetArgReg() == REG_STK);
#else // !UNIX_AMD64_ABI
// On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
assert(varDsc->lvIsRegArg && (varDsc->GetArgReg() == REG_ARG_0 || varDsc->GetArgReg() == REG_FLTARG_0));
#endif // !UNIX_AMD64_ABI
#endif // !DEBUG
}
else
{
#if FEATURE_FIXED_OUT_ARGS
baseVarNum = compiler->lvaOutgoingArgSpaceVar;
#else // !FEATURE_FIXED_OUT_ARGS
assert(!"No BaseVarForPutArgStk on x86");
baseVarNum = BAD_VAR_NUM;
#endif // !FEATURE_FIXED_OUT_ARGS
}
return baseVarNum;
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// putArgStk - the putArgStk node.
//
void CodeGen::genAlignStackBeforeCall(GenTreePutArgStk* putArgStk)
{
#if defined(UNIX_X86_ABI)
genAlignStackBeforeCall(putArgStk->gtCall);
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// call - the call node.
//
void CodeGen::genAlignStackBeforeCall(GenTreeCall* call)
{
#if defined(UNIX_X86_ABI)
// Have we aligned the stack yet?
if (!call->fgArgInfo->IsStkAlignmentDone())
{
// We haven't done any stack alignment yet for this call. We might need to create
// an alignment adjustment, even if this function itself doesn't have any stack args.
// This can happen if this function call is part of a nested call sequence, and the outer
// call has already pushed some arguments.
unsigned stkLevel = genStackLevel + call->fgArgInfo->GetStkSizeBytes();
call->fgArgInfo->ComputeStackAlignment(stkLevel);
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
if (padStkAlign != 0)
{
// Now generate the alignment
inst_RV_IV(INS_sub, REG_SPBASE, padStkAlign, EA_PTRSIZE);
AddStackLevel(padStkAlign);
AddNestedAlignment(padStkAlign);
}
call->fgArgInfo->SetStkAlignmentDone();
}
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genRemoveAlignmentAfterCall: After a call, remove the alignment
// added before the call, if any.
//
// Arguments:
// call - the call node.
// bias - additional stack adjustment
//
// Note:
// When bias > 0, caller should adjust stack level appropriately as
// bias is not considered when adjusting stack level.
//
void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias)
{
#if defined(TARGET_X86)
#if defined(UNIX_X86_ABI)
// Put back the stack pointer if there was any padding for stack alignment
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
unsigned padStkAdjust = padStkAlign + bias;
if (padStkAdjust != 0)
{
inst_RV_IV(INS_add, REG_SPBASE, padStkAdjust, EA_PTRSIZE);
SubtractStackLevel(padStkAlign);
SubtractNestedAlignment(padStkAlign);
}
#else // UNIX_X86_ABI
if (bias != 0)
{
if (bias == sizeof(int))
{
inst_RV(INS_pop, REG_ECX, TYP_INT);
}
else
{
inst_RV_IV(INS_add, REG_SPBASE, bias, EA_PTRSIZE);
}
}
#endif // !UNIX_X86_ABI_
#else // TARGET_X86
assert(bias == 0);
#endif // !TARGET_X86
}
#ifdef TARGET_X86
//---------------------------------------------------------------------
// genAdjustStackForPutArgStk:
// adjust the stack pointer for a putArgStk node if necessary.
//
// Arguments:
// putArgStk - the putArgStk node.
//
// Returns: true if the stack pointer was adjusted; false otherwise.
//
// Notes:
// Sets `m_pushStkArg` to true if the stack arg needs to be pushed,
// false if the stack arg needs to be stored at the current stack
// pointer address. This is exactly the opposite of the return value
// of this function.
//
bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk)
{
const unsigned argSize = putArgStk->GetStackByteSize();
GenTree* source = putArgStk->gtGetOp1();
#ifdef FEATURE_SIMD
if (!source->OperIs(GT_FIELD_LIST) && varTypeIsSIMD(source))
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
#endif // FEATURE_SIMD
#ifdef DEBUG
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
case GenTreePutArgStk::Kind::Unroll:
assert(!source->AsObj()->GetLayout()->HasGCPtr());
break;
case GenTreePutArgStk::Kind::Push:
case GenTreePutArgStk::Kind::PushAllSlots:
assert(source->OperIs(GT_FIELD_LIST) || source->AsObj()->GetLayout()->HasGCPtr() ||
(argSize < XMM_REGSIZE_BYTES));
break;
default:
unreached();
}
#endif // DEBUG
// In lowering (see "LowerPutArgStk") we have determined what sort of instructions
// are going to be used for this node. If we'll not be using "push"es, the stack
// needs to be adjusted first (s. t. the SP points to the base of the outgoing arg).
//
if (!putArgStk->isPushKind())
{
// If argSize is large, we need to probe the stack like we do in the prolog (genAllocLclFrame)
// or for localloc (genLclHeap), to ensure we touch the stack pages sequentially, and don't miss
// the stack guard pages. The prolog probes, but we don't know at this point how much higher
// the last probed stack pointer value is. We default a threshold. Any size below this threshold
// we are guaranteed the stack has been probed. Above this threshold, we don't know. The threshold
// should be high enough to cover all common cases. Increasing the threshold means adding a few
// more "lowest address of stack" probes in the prolog. Since this is relatively rare, add it to
// stress modes.
if ((argSize >= ARG_STACK_PROBE_THRESHOLD_BYTES) ||
compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)argSize, REG_NA);
}
else
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
}
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
// Otherwise, "push" will be adjusting the stack for us.
m_pushStkArg = true;
return false;
}
//---------------------------------------------------------------------
// genPutArgStkFieldList - generate code for passing a GT_FIELD_LIST arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node whose op1 is a GT_FIELD_LIST
//
// Return value:
// None
//
void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
{
GenTreeFieldList* const fieldList = putArgStk->gtOp1->AsFieldList();
assert(fieldList != nullptr);
// Set m_pushStkArg and pre-adjust the stack if necessary.
const bool preAdjustedStack = genAdjustStackForPutArgStk(putArgStk);
// For now, we only support the "push" case; we will push a full slot for the first field of each slot
// within the struct.
assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
// If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
// (Note that this mode is not currently being used.)
// If we are pushing the arguments (i.e. we have not pre-adjusted the stack), then we are pushing them
// in reverse order, so we start with the current field offset at the size of the struct arg (which must be
// a multiple of the target pointer size).
unsigned currentOffset = (preAdjustedStack) ? 0 : putArgStk->GetStackByteSize();
unsigned prevFieldOffset = currentOffset;
regNumber intTmpReg = REG_NA;
regNumber simdTmpReg = REG_NA;
if (putArgStk->AvailableTempRegCount() != 0)
{
regMaskTP rsvdRegs = putArgStk->gtRsvdRegs;
if ((rsvdRegs & RBM_ALLINT) != 0)
{
intTmpReg = putArgStk->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(intTmpReg));
}
if ((rsvdRegs & RBM_ALLFLOAT) != 0)
{
simdTmpReg = putArgStk->GetSingleTempReg(RBM_ALLFLOAT);
assert(genIsValidFloatReg(simdTmpReg));
}
assert(genCountBits(rsvdRegs) == (unsigned)((intTmpReg == REG_NA) ? 0 : 1) + ((simdTmpReg == REG_NA) ? 0 : 1));
}
for (GenTreeFieldList::Use& use : fieldList->Uses())
{
GenTree* const fieldNode = use.GetNode();
const unsigned fieldOffset = use.GetOffset();
var_types fieldType = use.GetType();
// Long-typed nodes should have been handled by the decomposition pass, and lowering should have sorted the
// field list in descending order by offset.
assert(!varTypeIsLong(fieldType));
assert(fieldOffset <= prevFieldOffset);
// Consume the register, if any, for this field. Note that genConsumeRegs() will appropriately
// update the liveness info for a lclVar that has been marked RegOptional, which hasn't been
// assigned a register, and which is therefore contained.
// Unlike genConsumeReg(), it handles the case where no registers are being consumed.
genConsumeRegs(fieldNode);
regNumber argReg = fieldNode->isUsedFromSpillTemp() ? REG_NA : fieldNode->GetRegNum();
// If the field is slot-like, we can use a push instruction to store the entire register no matter the type.
//
// The GC encoder requires that the stack remain 4-byte aligned at all times. Round the adjustment up
// to the next multiple of 4. If we are going to generate a `push` instruction, the adjustment must
// not require rounding.
// NOTE: if the field is of GC type, we must use a push instruction, since the emitter is not otherwise
// able to detect stores into the outgoing argument area of the stack on x86.
const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevFieldOffset - fieldOffset) >= 4);
int adjustment = roundUp(currentOffset - fieldOffset, 4);
if (fieldIsSlot && !varTypeIsSIMD(fieldType))
{
fieldType = genActualType(fieldType);
unsigned pushSize = genTypeSize(fieldType);
assert((pushSize % 4) == 0);
adjustment -= pushSize;
while (adjustment != 0)
{
inst_IV(INS_push, 0);
currentOffset -= pushSize;
AddStackLevel(pushSize);
adjustment -= pushSize;
}
m_pushStkArg = true;
}
else
{
m_pushStkArg = false;
// We always "push" floating point fields (i.e. they are full slot values that don't
// require special handling).
assert(varTypeIsIntegralOrI(fieldNode) || varTypeIsSIMD(fieldNode));
// If we can't push this field, it needs to be in a register so that we can store
// it to the stack location.
if (adjustment != 0)
{
// This moves the stack pointer to fieldOffset.
// For this case, we must adjust the stack and generate stack-relative stores rather than pushes.
// Adjust the stack pointer to the next slot boundary.
inst_RV_IV(INS_sub, REG_SPBASE, adjustment, EA_PTRSIZE);
currentOffset -= adjustment;
AddStackLevel(adjustment);
}
// Does it need to be in a byte register?
// If so, we'll use intTmpReg, which must have been allocated as a byte register.
// If it's already in a register, but not a byteable one, then move it.
if (varTypeIsByte(fieldType) && ((argReg == REG_NA) || ((genRegMask(argReg) & RBM_BYTE_REGS) == 0)))
{
assert(intTmpReg != REG_NA);
noway_assert((genRegMask(intTmpReg) & RBM_BYTE_REGS) != 0);
if (argReg != REG_NA)
{
inst_Mov(fieldType, intTmpReg, argReg, /* canSkip */ false);
argReg = intTmpReg;
}
}
}
if (argReg == REG_NA)
{
if (m_pushStkArg)
{
if (fieldNode->isUsedFromSpillTemp())
{
assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
assert(fieldNode->IsRegOptional());
TempDsc* tmp = getSpillTempDsc(fieldNode);
GetEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
regSet.tmpRlsTemp(tmp);
}
else
{
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_TT(INS_push, fieldNode, 0, 0, emitActualTypeSize(fieldNode->TypeGet()));
break;
case GT_CNS_INT:
if (fieldNode->IsIconHandle())
{
inst_IV_handle(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
break;
default:
unreached();
}
}
currentOffset -= TARGET_POINTER_SIZE;
AddStackLevel(TARGET_POINTER_SIZE);
}
else
{
// The stack has been adjusted and we will load the field to intTmpReg and then store it on the stack.
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_RV_TT(INS_mov, intTmpReg, fieldNode);
break;
case GT_CNS_INT:
genSetRegToConst(intTmpReg, fieldNode->TypeGet(), fieldNode);
break;
default:
unreached();
}
genStoreRegToStackArg(fieldType, intTmpReg, fieldOffset - currentOffset);
}
}
else
{
#if defined(FEATURE_SIMD)
if (fieldType == TYP_SIMD12)
{
assert(genIsValidFloatReg(simdTmpReg));
genStoreSIMD12ToStack(argReg, simdTmpReg);
}
else
#endif // defined(FEATURE_SIMD)
{
genStoreRegToStackArg(fieldType, argReg, fieldOffset - currentOffset);
}
if (m_pushStkArg)
{
// We always push a slot-rounded size
currentOffset -= genTypeSize(fieldType);
}
}
prevFieldOffset = fieldOffset;
}
if (currentOffset != 0)
{
// We don't expect padding at the beginning of a struct, but it could happen with explicit layout.
inst_RV_IV(INS_sub, REG_SPBASE, currentOffset, EA_PTRSIZE);
AddStackLevel(currentOffset);
}
}
#endif // TARGET_X86
//---------------------------------------------------------------------
// genPutArgStk - generate code for passing an arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* data = putArgStk->gtOp1;
var_types targetType = genActualType(data->TypeGet());
#ifdef TARGET_X86
genAlignStackBeforeCall(putArgStk);
if ((data->OperGet() != GT_FIELD_LIST) && varTypeIsStruct(targetType))
{
(void)genAdjustStackForPutArgStk(putArgStk);
genPutStructArgStk(putArgStk);
return;
}
// On a 32-bit target, all of the long arguments are handled with GT_FIELD_LISTs of TYP_INT.
assert(targetType != TYP_LONG);
const unsigned argSize = putArgStk->GetStackByteSize();
assert((argSize % TARGET_POINTER_SIZE) == 0);
if (data->isContainedIntOrIImmed())
{
if (data->IsIconHandle())
{
inst_IV_handle(INS_push, data->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, data->AsIntCon()->gtIconVal);
}
AddStackLevel(argSize);
}
else if (data->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(putArgStk);
}
else
{
// We should not see any contained nodes that are not immediates.
assert(data->isUsedFromReg());
genConsumeReg(data);
genPushReg(targetType, data->GetRegNum());
}
#else // !TARGET_X86
{
unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
#ifdef UNIX_AMD64_ABI
if (data->OperIs(GT_FIELD_LIST))
{
genPutArgStkFieldList(putArgStk, baseVarNum);
return;
}
else if (varTypeIsStruct(targetType))
{
m_stkArgVarNum = baseVarNum;
m_stkArgOffset = putArgStk->getArgOffset();
genPutStructArgStk(putArgStk);
m_stkArgVarNum = BAD_VAR_NUM;
return;
}
#endif // UNIX_AMD64_ABI
noway_assert(targetType != TYP_STRUCT);
// Get argument offset on stack.
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffset = putArgStk->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(putArgStk->gtCall, putArgStk);
assert(curArgTabEntry != nullptr);
assert(argOffset == curArgTabEntry->slotNum * TARGET_POINTER_SIZE);
#endif
if (data->isContainedIntOrIImmed())
{
GetEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
(int)data->AsIntConCommon()->IconValue());
}
else
{
assert(data->isUsedFromReg());
genConsumeReg(data);
GetEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->GetRegNum(), baseVarNum,
argOffset);
}
}
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
#ifndef UNIX_AMD64_ABI
assert(targetType != TYP_STRUCT);
#endif // !UNIX_AMD64_ABI
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
#ifdef TARGET_X86
// genPushReg: Push a register value onto the stack and adjust the stack level
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
//
// Notes:
// For TYP_LONG, the srcReg must be a floating point register.
// Otherwise, the register type must be consistent with the given type.
//
void CodeGen::genPushReg(var_types type, regNumber srcReg)
{
unsigned size = genTypeSize(type);
if (varTypeIsIntegralOrI(type) && type != TYP_LONG)
{
assert(genIsValidIntReg(srcReg));
inst_RV(INS_push, srcReg, type);
}
else
{
instruction ins;
emitAttr attr = emitTypeSize(type);
if (type == TYP_LONG)
{
// On x86, the only way we can push a TYP_LONG from a register is if it is in an xmm reg.
// This is only used when we are pushing a struct from memory to memory, and basically is
// handling an 8-byte "chunk", as opposed to strictly a long type.
ins = INS_movq;
}
else
{
ins = ins_Store(type);
}
assert(genIsValidFloatReg(srcReg));
inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
}
AddStackLevel(size);
}
#endif // TARGET_X86
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
// genStoreRegToStackArg: Store a register value into the stack argument area
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
// offset - the offset from the base (see Assumptions below)
//
// Notes:
// A type of TYP_STRUCT instructs this method to store a 16-byte chunk
// at the given offset (i.e. not the full struct).
//
// Assumptions:
// The caller must set the context appropriately before calling this method:
// - On x64, m_stkArgVarNum must be set according to whether this is a regular or tail call.
// - On x86, the caller must set m_pushStkArg if this method should push the argument.
// Otherwise, the argument is stored at the given offset from sp.
//
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
//
void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset)
{
assert(srcReg != REG_NA);
instruction ins;
emitAttr attr;
unsigned size;
if (type == TYP_STRUCT)
{
ins = INS_movdqu;
// This should be changed!
attr = EA_8BYTE;
size = 16;
}
else
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(type))
{
assert(genIsValidFloatReg(srcReg));
ins = ins_Store(type); // TODO-CQ: pass 'aligned' correctly
}
else
#endif // FEATURE_SIMD
#ifdef TARGET_X86
if (type == TYP_LONG)
{
assert(genIsValidFloatReg(srcReg));
ins = INS_movq;
}
else
#endif // TARGET_X86
{
assert((varTypeUsesFloatReg(type) && genIsValidFloatReg(srcReg)) ||
(varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg)));
ins = ins_Store(type);
}
attr = emitTypeSize(type);
size = genTypeSize(type);
}
#ifdef TARGET_X86
if (m_pushStkArg)
{
genPushReg(type, srcReg);
}
else
{
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
}
#else // !TARGET_X86
assert(m_stkArgVarNum != BAD_VAR_NUM);
GetEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutStructArgStk - generate code for copying a struct arg on the stack by value.
// In case there are references to heap object in the struct,
// it generates the gcinfo as well.
//
// Arguments
// putArgStk - the GT_PUTARG_STK node
//
// Notes:
// In the case of fixed out args, the caller must have set m_stkArgVarNum to the variable number
// corresponding to the argument area (where we will put the argument on the stack).
// For tail calls this is the baseVarNum = 0.
// For non tail calls this is the outgoingArgSpace.
//
void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* source = putArgStk->gtGetOp1();
var_types targetType = source->TypeGet();
#if defined(TARGET_X86) && defined(FEATURE_SIMD)
if (putArgStk->isSIMD12())
{
genPutArgStkSIMD12(putArgStk);
return;
}
#endif // defined(TARGET_X86) && defined(FEATURE_SIMD)
if (varTypeIsSIMD(targetType))
{
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
genStoreRegToStackArg(targetType, srcReg, 0);
return;
}
assert(targetType == TYP_STRUCT);
ClassLayout* layout = source->AsObj()->GetLayout();
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
genStructPutArgRepMovs(putArgStk);
break;
#ifndef TARGET_X86
case GenTreePutArgStk::Kind::PartialRepInstr:
genStructPutArgPartialRepMovs(putArgStk);
break;
#endif // !TARGET_X86
case GenTreePutArgStk::Kind::Unroll:
genStructPutArgUnroll(putArgStk);
break;
#ifdef TARGET_X86
case GenTreePutArgStk::Kind::Push:
genStructPutArgPush(putArgStk);
break;
#endif // TARGET_X86
default:
unreached();
}
}
#endif // defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Create and record GC Info for the function.
*/
#ifndef JIT32_GCENCODER
void
#else // !JIT32_GCENCODER
void*
#endif // !JIT32_GCENCODER
CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
{
#ifdef JIT32_GCENCODER
return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
#else // !JIT32_GCENCODER
genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
#endif // !JIT32_GCENCODER
}
#ifdef JIT32_GCENCODER
void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
BYTE headerBuf[64];
InfoHdr header;
int s_cached;
#ifdef FEATURE_EH_FUNCLETS
// We should do this before gcInfoBlockHdrSave since varPtrTableSize must be finalized before it
if (compiler->ehAnyFunclets())
{
gcInfo.gcMarkFilterVarsPinned();
}
#endif
#ifdef DEBUG
size_t headerSize =
#endif
compiler->compInfoBlkSize =
gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
size_t argTabOffset = 0;
size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
#if DISPLAY_SIZES
if (GetInterruptible())
{
gcHeaderISize += compiler->compInfoBlkSize;
gcPtrMapISize += ptrMapSize;
}
else
{
gcHeaderNSize += compiler->compInfoBlkSize;
gcPtrMapNSize += ptrMapSize;
}
#endif // DISPLAY_SIZES
compiler->compInfoBlkSize += ptrMapSize;
/* Allocate the info block for the method */
compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
#if 0 // VERBOSE_SIZES
// TODO-X86-Cleanup: 'dataSize', below, is not defined
// if (compiler->compInfoBlkSize > codeSize && compiler->compInfoBlkSize > 100)
{
printf("[%7u VM, %7u+%7u/%7u x86 %03u/%03u%%] %s.%s\n",
compiler->info.compILCodeSize,
compiler->compInfoBlkSize,
codeSize + dataSize,
codeSize + dataSize - prologSize - epilogSize,
100 * (codeSize + dataSize) / compiler->info.compILCodeSize,
100 * (codeSize + dataSize + compiler->compInfoBlkSize) / compiler->info.compILCodeSize,
compiler->info.compClassName,
compiler->info.compMethodName);
}
#endif
/* Fill in the info block and return it to the caller */
void* infoPtr = compiler->compInfoBlkAddr;
/* Create the method info block: header followed by GC tracking tables */
compiler->compInfoBlkAddr +=
gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
#ifdef DEBUG
if (0)
{
BYTE* temp = (BYTE*)infoPtr;
size_t size = compiler->compInfoBlkAddr - temp;
BYTE* ptab = temp + headerSize;
noway_assert(size == headerSize + ptrMapSize);
printf("Method info block - header [%zu bytes]:", headerSize);
for (unsigned i = 0; i < size; i++)
{
if (temp == ptab)
{
printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
}
else
{
if (!(i % 16))
printf("\n %04X: ", i);
}
printf("%02X ", *temp++);
}
printf("\n");
}
#endif // DEBUG
#if DUMP_GC_TABLES
if (compiler->opts.dspGCtbls)
{
const BYTE* base = (BYTE*)infoPtr;
size_t size;
unsigned methodSize;
InfoHdr dumpHeader;
printf("GC Info for method %s\n", compiler->info.compFullName);
printf("GC info size = %3u\n", compiler->compInfoBlkSize);
size = gcInfo.gcInfoBlockHdrDump(base, &dumpHeader, &methodSize);
// printf("size of header encoding is %3u\n", size);
printf("\n");
if (compiler->opts.dspGCtbls)
{
base += size;
size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
// printf("size of pointer table is %3u\n", size);
printf("\n");
noway_assert(compiler->compInfoBlkAddr == (base + size));
}
}
#endif // DUMP_GC_TABLES
/* Make sure we ended up generating the expected number of bytes */
noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
return infoPtr;
}
#else // !JIT32_GCENCODER
void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
{
preservedAreaSize += REGSIZE_BYTES;
}
// bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
preservedAreaSize += 4;
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
#endif // !JIT32_GCENCODER
/*****************************************************************************
* Emit a call to a helper function.
*
*/
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
{
void* addr = nullptr;
void* pAddr = nullptr;
emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
regNumber callTarget = REG_NA;
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
if (!addr)
{
assert(pAddr != nullptr);
// Absolute indirect call addr
// Note: Order of checks is important. First always check for pc-relative and next
// zero-relative. Because the former encoding is 1-byte smaller than the latter.
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)pAddr) ||
genCodeIndirAddrCanBeEncodedAsZeroRelOffset((size_t)pAddr))
{
// generate call whose target is specified by 32-bit offset relative to PC or zero.
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = pAddr;
}
else
{
#ifdef TARGET_AMD64
// If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
// load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
// make the call.
// mov reg, addr
// call [reg]
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & killMask) == callTargetMask);
}
else
{
// The call target must not overwrite any live variable, though it may not be in the
// kill set for the call.
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & regSet.GetMaskVars()) == RBM_NONE);
}
#endif
callTarget = callTargetReg;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, callTarget, (ssize_t)pAddr);
callType = emitter::EC_INDIR_ARD;
}
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
callTarget, // ireg
REG_NA, 0, 0, // xreg, xmul, disp
false // isJump
);
// clang-format on
regSet.verifyRegistersUsed(killMask);
}
/*****************************************************************************
* Unit testing of the XArch emitter: generate a bunch of instructions into the prolog
* (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
* disassembler thinks the instructions as the same as we do.
*/
// Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
// After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
//#define ALL_XARCH_EMITTER_UNIT_TESTS
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void CodeGen::genAmd64EmitterUnitTests()
{
if (!verbose)
{
return;
}
if (!compiler->opts.altJit)
{
// No point doing this in a "real" JIT.
return;
}
// Mark the "fake" instructions in the output.
printf("*************** In genAmd64EmitterUnitTests()\n");
// We use this:
// genDefineTempLabel(genCreateTempLabel());
// to create artificial labels to help separate groups of tests.
//
// Loads
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef ALL_XARCH_EMITTER_UNIT_TESTS
genDefineTempLabel(genCreateTempLabel());
// vhaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
#endif // ALL_XARCH_EMITTER_UNIT_TESTS
printf("*************** End of genAmd64EmitterUnitTests()\n");
}
#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
#ifdef PROFILING_SUPPORTED
#ifdef TARGET_X86
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. This variable remains unchanged.
//
// Return Value:
// None
//
// Notes:
// The x86 profile enter helper has the following requirements (see ProfileEnterNaked in
// VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileEnterHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. All registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
unsigned saveStackLvl2 = genStackLevel;
// Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK()
// for x86 stack unwinding
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
#endif // UNIX_X86_ABI
// Push the profilerHandle
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER,
0, // argSize. Again, we have to lie about it
EA_UNKNOWN); // retSize
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
// Notes:
// The x86 profile leave/tailcall helper has the following requirements (see ProfileLeaveNaked and
// ProfileTailcallNaked in VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileLeaveHelper or ProfileTailcallHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. helper == CORINFO_HELP_PROF_FCN_LEAVE: All registers are preserved.
// helper == CORINFO_HELP_PROF_FCN_TAILCALL: Only argument registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
// Need to save on to the stack level, since the helper call will pop the argument
unsigned saveStackLvl2 = genStackLevel;
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
AddStackLevel(0xC);
AddNestedAlignment(0xC);
#endif // UNIX_X86_ABI
//
// Push the profilerHandle
//
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
genSinglePush();
#if defined(UNIX_X86_ABI)
int argSize = -REGSIZE_BYTES; // negative means caller-pop (cdecl)
#else
int argSize = REGSIZE_BYTES;
#endif
genEmitHelperCall(helper, argSize, EA_UNKNOWN /* retSize */);
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
SubtractStackLevel(0x10);
SubtractNestedAlignment(0xC);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
#endif // TARGET_X86
#ifdef TARGET_AMD64
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
#if !defined(UNIX_AMD64_ABI)
unsigned varNum;
LclVarDsc* varDsc;
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// Home all arguments passed in arg registers (RCX, RDX, R8 and R9).
// In case of vararg methods, arg regs are already homed.
//
// Note: Here we don't need to worry about updating gc'info since enter
// callback is generated as part of prolog which is non-gc interruptible.
// Moreover GC cannot kick while executing inside profiler callback which is a
// profiler requirement so it can examine arguments which could be obj refs.
if (!compiler->info.compIsVarArgs)
{
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types storeType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction store_ins = ins_Store(storeType);
#ifdef FEATURE_SIMD
if ((storeType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
store_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_S_R(store_ins, emitTypeSize(storeType), argReg, varNum, 0);
}
}
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_8BYTE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
// TODO-AMD64-CQ: Rather than reloading, see if this could be optimized by combining with prolog
// generation logic that moves args around as required by first BB entry point conditions
// computed by LSRA. Code pointers for investigating this further: genFnPrologCalleeRegArgs()
// and genEnregisterIncomingStackArgs().
//
// Now reload arg registers from home locations.
// Vararg methods:
// - we need to reload only known (i.e. fixed) reg args.
// - if floating point type, also reload it into corresponding integer reg
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction load_ins = ins_Load(loadType);
#ifdef FEATURE_SIMD
if ((loadType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
load_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_R_S(load_ins, emitTypeSize(loadType), argReg, varNum, 0);
if (compFeatureVarArg() && compiler->info.compIsVarArgs && varTypeIsFloating(loadType))
{
regNumber intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
}
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#else // !defined(UNIX_AMD64_ABI)
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// R14 = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_0,
(ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_ENTER_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// R15 = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_1, genFramePointerReg(), -callerSPOffset);
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#endif // !defined(UNIX_AMD64_ABI)
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
#if !defined(UNIX_AMD64_ABI)
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// If thisPtr needs to be kept alive and reported, it cannot be one of the callee trash
// registers that profiler callback kills.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg())
{
regMaskTP thisPtrMask = genRegMask(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum());
noway_assert((RBM_PROFILER_LEAVE_TRASH & thisPtrMask) == 0);
}
// At this point return value is computed and stored in RAX or XMM0.
// On Amd64, Leave callback preserves the return register. We keep
// RAX alive by not reporting as trashed by helper call. Also note
// that GC cannot kick-in while executing inside profiler callback,
// which is a requirement of profiler as well since it needs to examine
// return value which could be an obj ref.
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of an address.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// TODO-AMD64-Cleanup: Once we start doing codegen after final frame layout, retain the "if" portion
// of the stmnts to execute unconditionally and clean-up rest.
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
// Caller's SP relative offset to FramePointer will be negative. We need to add absolute
// value of that offset to FramePointer to obtain caller's SP value.
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
// If we are here means that it is a tentative frame layout during which we
// cannot use caller's SP offset since it is an estimate. For now we require the
// method to have at least a single arg so that we can use it to obtain caller's
// SP.
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RCX, RDX) for call target.
// We use R8 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r8, helper addr; call r8"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_ARG_2);
#else // !defined(UNIX_AMD64_ABI)
// RDI = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RSI = caller's SP
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
#endif // !defined(UNIX_AMD64_ABI)
}
#endif // TARGET_AMD64
#endif // PROFILING_SUPPORTED
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genOSRRecordTier0CalleeSavedRegistersAndFrame: for OSR methods, record the
// subset of callee saves already saved by the Tier0 method, and the frame
// created by Tier0.
//
void CodeGen::genOSRRecordTier0CalleeSavedRegistersAndFrame()
{
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves was already saved by Tier0.
// Emit appropriate unwind.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
int const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
JITDUMP("--OSR--- tier0 has already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("\n");
// We must account for the Tier0 callee saves.
//
// These have already happened at method entry; all these
// unwind records should be at offset 0.
//
// RBP is always aved by Tier0 and always pushed first.
//
assert((tier0IntCalleeSaves & RBM_FPBASE) == RBM_FPBASE);
compiler->unwindPush(REG_RBP);
tier0IntCalleeSaves &= ~RBM_FPBASE;
// Now the rest of the Tier0 callee saves.
//
for (regNumber reg = REG_INT_LAST; tier0IntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & tier0IntCalleeSaves) != 0)
{
compiler->unwindPush(reg);
}
tier0IntCalleeSaves &= ~regBit;
}
// We must account for the post-callee-saves push SP movement
// done by the Tier0 frame and by the OSR transition.
//
// tier0FrameSize is the Tier0 FP-SP delta plus the fake call slot added by
// JIT_Patchpoint. We add one slot to account for the saved FP.
//
// We then need to subtract off the size the Tier0 callee saves as SP
// adjusts for those will have been modelled by the unwind pushes above.
//
int const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
int const tier0NetSize = tier0FrameSize - tier0IntCalleeSaveUsedSize;
compiler->unwindAllocStack(tier0NetSize);
}
//------------------------------------------------------------------------
// genOSRSaveRemainingCalleeSavedRegisters: save any callee save registers
// that Tier0 didn't save.
//
// Notes:
// This must be invoked after SP has been adjusted to allocate the local
// frame, because of how the UnwindSave records are interpreted.
//
// We rely on the fact that other "local frame" allocation actions (like
// stack probing) will not trash callee saves registers.
//
void CodeGen::genOSRSaveRemainingCalleeSavedRegisters()
{
// We should be generating the prolog of an OSR root frame.
//
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves still needs saving.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
regMaskTP const osrIntCalleeSaves = rsPushRegs & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP osrAdditionalIntCalleeSaves = osrIntCalleeSaves & ~tier0IntCalleeSaves;
JITDUMP("---OSR--- int callee saves are ");
JITDUMPEXEC(dspRegMask(osrIntCalleeSaves));
JITDUMP("; tier0 already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("; so only saving ");
JITDUMPEXEC(dspRegMask(osrAdditionalIntCalleeSaves));
JITDUMP("\n");
// These remaining callee saves will be stored in the Tier0 callee save area
// below any saves already done by Tier0. Compute the offset.
//
// The OSR method doesn't actually use its callee save area.
//
int const osrFrameSize = compiler->compLclFrameSize;
int const tier0FrameSize = patchpointInfo->TotalFrameSize();
int const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
int const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
int offset = osrFrameSize + osrCalleeSaveSize + osrFramePointerSize + tier0FrameSize - tier0IntCalleeSaveUsedSize;
// The tier0 frame is always an RBP frame, so the OSR method should never need to save RBP.
//
assert((tier0CalleeSaves & RBM_FPBASE) == RBM_FPBASE);
assert((osrAdditionalIntCalleeSaves & RBM_FPBASE) == RBM_NONE);
// The OSR method must use MOVs to save additional callee saves.
//
for (regNumber reg = REG_INT_LAST; osrAdditionalIntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & osrAdditionalIntCalleeSaves) != 0)
{
GetEmitter()->emitIns_AR_R(INS_mov, EA_8BYTE, reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
offset -= REGSIZE_BYTES;
}
osrAdditionalIntCalleeSaves &= ~regBit;
}
}
#endif // TARGET_AMD64
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
void CodeGen::genPushCalleeSavedRegisters()
{
assert(compiler->compGeneratingProlog);
#if DEBUG
// OSR root frames must handle this differently. See
// genOSRRecordTier0CalleeSavedRegisters()
// genOSRSaveRemainingCalleeSavedRegisters()
//
if (compiler->opts.IsOSR())
{
assert(compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT);
}
#endif
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On X86/X64 we have already pushed the FP (frame-pointer) prior to calling this method
if (isFramePointerUsed())
{
rsPushRegs &= ~RBM_FPBASE;
}
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
// Push backwards so we match the order we will pop them in the epilog
// and all the other code that expects it to be in this order.
for (regNumber reg = REG_INT_LAST; rsPushRegs != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & rsPushRegs) != 0)
{
inst_RV(INS_push, reg, TYP_REF);
compiler->unwindPush(reg);
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(REGSIZE_BYTES);
}
#endif // USING_SCOPE_INFO
rsPushRegs &= ~regBit;
}
}
}
void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
#ifdef TARGET_AMD64
const bool isFunclet = compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT;
const bool doesSupersetOfNormalPops = compiler->opts.IsOSR() && !isFunclet;
// OSR methods must restore all registers saved by either the OSR or
// the Tier0 method. First restore any callee save not saved by
// Tier0, then the callee saves done by Tier0.
//
// OSR funclets do normal restores.
//
if (doesSupersetOfNormalPops)
{
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP tier0CalleeSaves =
((regMaskTP)compiler->info.compPatchpointInfo->CalleeSaveRegisters()) & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP additionalCalleeSaves = rsPopRegs & ~tier0CalleeSaves;
// Registers saved by the OSR prolog.
//
genPopCalleeSavedRegistersFromMask(additionalCalleeSaves);
// Registers saved by the Tier0 prolog.
// Tier0 frame pointer will be restored separately.
//
genPopCalleeSavedRegistersFromMask(tier0CalleeSaves & ~RBM_FPBASE);
return;
}
#endif // TARGET_AMD64
// Registers saved by a normal prolog
//
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
const unsigned popCount = genPopCalleeSavedRegistersFromMask(rsPopRegs);
noway_assert(compiler->compCalleeRegsPushed == popCount);
}
//------------------------------------------------------------------------
// genPopCalleeSavedRegistersFromMask: pop specified set of callee saves
// in the "standard" order
//
unsigned CodeGen::genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs)
{
unsigned popCount = 0;
if ((rsPopRegs & RBM_EBX) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EBX, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_FPBASE) != 0)
{
// EBP cannot be directly modified for EBP frame and double-aligned frames
assert(!doubleAlignOrFramePointerUsed());
popCount++;
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#ifndef UNIX_AMD64_ABI
// For System V AMD64 calling convention ESI and EDI are volatile registers.
if ((rsPopRegs & RBM_ESI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_ESI, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_EDI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EDI, TYP_I_IMPL);
}
#endif // !defined(UNIX_AMD64_ABI)
#ifdef TARGET_AMD64
if ((rsPopRegs & RBM_R12) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R12, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R13) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R13, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R14) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R14, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R15) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R15, TYP_I_IMPL);
}
#endif // TARGET_AMD64
// Amd64/x86 doesn't support push/pop of xmm registers.
// These will get saved to stack separately after allocating
// space on stack in prolog sequence. PopCount is essentially
// tracking the count of integer registers pushed.
return popCount;
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts
#ifdef DEBUG
genInterruptibleUsed = true;
#endif
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n__epilog:\n");
}
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif
// Restore float registers that were saved to stack before SP is modified.
genRestoreCalleeSavedFltRegs(compiler->compLclFrameSize);
#ifdef JIT32_GCENCODER
// When using the JIT32 GC encoder, we do not start the OS-reported portion of the epilog until after
// the above call to `genRestoreCalleeSavedFltRegs` because that function
// a) does not actually restore any registers: there are none when targeting the Windows x86 ABI,
// which is the only target that uses the JIT32 GC encoder
// b) may issue a `vzeroupper` instruction to eliminate AVX -> SSE transition penalties.
// Because the `vzeroupper` instruction is not recognized by the VM's unwinder and there are no
// callee-save FP restores that the unwinder would need to see, we can avoid the need to change the
// unwinder (and break binary compat with older versions of the runtime) by starting the epilog
// after any `vzeroupper` instruction has been emitted. If either of the above conditions changes,
// we will need to rethink this.
GetEmitter()->emitStartEpilog();
#endif
/* Compute the size in bytes we've pushed/popped */
bool removeEbpFrame = doubleAlignOrFramePointerUsed();
#ifdef TARGET_AMD64
// We only remove the EBP frame using the frame pointer (using `lea rsp, [rbp + const]`)
// if we reported the frame pointer in the prolog. The Windows x64 unwinding ABI specifically
// disallows this `lea` form:
//
// See https://docs.microsoft.com/en-us/cpp/build/prolog-and-epilog?view=msvc-160#epilog-code
//
// "When a frame pointer is not used, the epilog must use add RSP,constant to deallocate the fixed part of the
// stack. It may not use lea RSP,constant[RSP] instead. This restriction exists so the unwind code has fewer
// patterns to recognize when searching for epilogs."
//
// Otherwise, we must use `add RSP, constant`, as stated. So, we need to use the same condition
// as genFnProlog() used in determining whether to report the frame pointer in the unwind data.
// This is a subset of the `doubleAlignOrFramePointerUsed()` cases.
//
if (removeEbpFrame)
{
const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
removeEbpFrame = removeEbpFrame && reportUnwindData;
}
#endif // TARGET_AMD64
if (!removeEbpFrame)
{
// We have an ESP frame */
noway_assert(compiler->compLocallocUsed == false); // Only used with frame-pointer
/* Get rid of our local variables */
unsigned int frameSize = compiler->compLclFrameSize;
#ifdef TARGET_AMD64
// OSR must remove the entire OSR frame and the Tier0 frame down to the bottom
// of the used part of the Tier0 callee save area.
//
if (compiler->opts.IsOSR())
{
// The patchpoint TotalFrameSize is SP-FP delta (plus "call" slot added by JIT_Patchpoint)
// so does not account for the Tier0 push of FP, so we add in an extra stack slot to get the
// offset to the top of the Tier0 callee saves area.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP const tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const osrIntCalleeSaves = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const allIntCalleeSaves = osrIntCalleeSaves | tier0IntCalleeSaves;
unsigned const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(allIntCalleeSaves) * REGSIZE_BYTES;
unsigned const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
unsigned const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
unsigned const osrAdjust =
tier0FrameSize - tier0IntCalleeSaveUsedSize + osrCalleeSaveSize + osrFramePointerSize;
JITDUMP("OSR epilog adjust factors: tier0 frame %u, tier0 callee saves -%u, osr callee saves %u, osr "
"framePointer %u\n",
tier0FrameSize, tier0IntCalleeSaveUsedSize, osrCalleeSaveSize, osrFramePointerSize);
JITDUMP(" OSR frame size %u; net osr adjust %u, result %u\n", frameSize, osrAdjust,
frameSize + osrAdjust);
frameSize += osrAdjust;
}
#endif // TARGET_AMD64
if (frameSize > 0)
{
#ifdef TARGET_X86
/* Add 'compiler->compLclFrameSize' to ESP */
/* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
if ((frameSize == TARGET_POINTER_SIZE) && !compiler->compJmpOpUsed)
{
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
else
#endif // TARGET_X86
{
/* Add 'compiler->compLclFrameSize' to ESP */
/* Generate "add esp, <stack-size>" */
inst_RV_IV(INS_add, REG_SPBASE, frameSize, EA_PTRSIZE);
}
}
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// In the case where we have an RSP frame, and no frame pointer reported in the OS unwind info,
// but we do have a pushed frame pointer and established frame chain, we do need to pop RBP.
//
// OSR methods must always pop RBP (pushed by Tier0 frame)
if (doubleAlignOrFramePointerUsed() || compiler->opts.IsOSR())
{
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#endif // TARGET_AMD64
}
else
{
noway_assert(doubleAlignOrFramePointerUsed());
// We don't support OSR for methods that must report an FP in unwind.
//
assert(!compiler->opts.IsOSR());
/* Tear down the stack frame */
bool needMovEspEbp = false;
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
//
// add esp, compLclFrameSize
//
// We need not do anything (except the "mov esp, ebp") if
// compiler->compCalleeRegsPushed==0. However, this is unlikely, and it
// also complicates the code manager. Hence, we ignore that case.
noway_assert(compiler->compLclFrameSize != 0);
inst_RV_IV(INS_add, REG_SPBASE, compiler->compLclFrameSize, EA_PTRSIZE);
needMovEspEbp = true;
}
else
#endif // DOUBLE_ALIGN
{
bool needLea = false;
if (compiler->compLocallocUsed)
{
// OSR not yet ready for localloc
assert(!compiler->opts.IsOSR());
// ESP may be variable if a localloc was actually executed. Reset it.
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
needLea = true;
}
else if (!regSet.rsRegsModified(RBM_CALLEE_SAVED))
{
if (compiler->compLclFrameSize != 0)
{
#ifdef TARGET_AMD64
// AMD64 can't use "mov esp, ebp", according to the ABI specification describing epilogs. So,
// do an LEA to "pop off" the frame allocation.
needLea = true;
#else // !TARGET_AMD64
// We will just generate "mov esp, ebp" and be done with it.
needMovEspEbp = true;
#endif // !TARGET_AMD64
}
}
else if (compiler->compLclFrameSize == 0)
{
// do nothing before popping the callee-saved registers
}
#ifdef TARGET_X86
else if (compiler->compLclFrameSize == REGSIZE_BYTES)
{
// "pop ecx" will make ESP point to the callee-saved registers
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
#endif // TARGET_X86
else
{
// We need to make ESP point to the callee-saved registers
needLea = true;
}
if (needLea)
{
int offset;
#ifdef TARGET_AMD64
// lea esp, [ebp + compiler->compLclFrameSize - genSPtoFPdelta]
//
// Case 1: localloc not used.
// genSPToFPDelta = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize
// offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
// The amount to be subtracted from RBP to point at callee saved int regs.
//
// Case 2: localloc used
// genSPToFPDelta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize)
// Offset = Amount to be added to RBP to point at callee saved int regs.
offset = genSPtoFPdelta() - compiler->compLclFrameSize;
// Offset should fit within a byte if localloc is not used.
if (!compiler->compLocallocUsed)
{
noway_assert(offset < UCHAR_MAX);
}
#else
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
noway_assert(offset < UCHAR_MAX); // the offset fits in a byte
#endif
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -offset);
}
}
//
// Pop the callee-saved registers (if any)
//
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// Extra OSR adjust to get to where RBP was saved by the tier0 frame.
//
// Note the other callee saves made in that frame are dead, the current method
// will save and restore what it needs.
if (compiler->opts.IsOSR())
{
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
// Use add since we know the SP-to-FP delta of the original method.
// We also need to skip over the slot where we pushed RBP.
//
// If we ever allow the original method to have localloc this will
// need to change.
inst_RV_IV(INS_add, REG_SPBASE, tier0FrameSize + TARGET_POINTER_SIZE, EA_PTRSIZE);
}
assert(!needMovEspEbp); // "mov esp, ebp" is not allowed in AMD64 epilogs
#else // !TARGET_AMD64
if (needMovEspEbp)
{
// mov esp, ebp
inst_Mov(TYP_I_IMPL, REG_SPBASE, REG_FPBASE, /* canSkip */ false);
}
#endif // !TARGET_AMD64
// pop ebp
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
GetEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
/* Check if this a special return block i.e.
* CEE_JMP instruction */
if (jmpEpilog)
{
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode());
// figure out what jump we have
GenTree* jmpNode = block->lastNode();
#if !FEATURE_FASTTAILCALL
// x86
noway_assert(jmpNode->gtOper == GT_JMP);
#else
// amd64
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
CORINFO_METHOD_HANDLE methHnd = (CORINFO_METHOD_HANDLE)jmpNode->AsVal()->gtVal1;
CORINFO_CONST_LOOKUP addrInfo;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
if (addrInfo.accessType != IAT_VALUE && addrInfo.accessType != IAT_PVALUE)
{
NO_WAY("Unsupported JMP indirection");
}
// If we have IAT_PVALUE we might need to jump via register indirect, as sometimes the
// indirection cell can't be reached by the jump.
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
if (addrInfo.accessType == IAT_PVALUE)
{
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)addrInfo.addr))
{
// 32 bit displacement will work
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
else
{
// 32 bit displacement won't work
callType = emitter::EC_INDIR_ARD;
indCallReg = REG_RAX;
addr = nullptr;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
regSet.verifyRegUsed(indCallReg);
}
}
else
{
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN // retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN), // secondRetSize
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, REG_NA, 0, 0, /* ireg, xreg, xmul, disp */
true /* isJump */
);
// clang-format on
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
unsigned stkArgSize = 0; // Zero on all platforms except x86
#if defined(TARGET_X86)
bool fCalleePop = true;
// varargs has caller pop
if (compiler->info.compIsVarArgs)
fCalleePop = false;
if (IsCallerPop(compiler->info.compCallConv))
fCalleePop = false;
if (fCalleePop)
{
noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
}
#ifdef UNIX_X86_ABI
// The called function must remove hidden address argument from the stack before returning
// in case of struct returning according to cdecl calling convention on linux.
// Details: http://www.sco.com/developers/devspecs/abi386-4.pdf pages 40-43
if (compiler->info.compCallConv == CorInfoCallConvExtension::C && compiler->info.compRetBuffArg != BAD_VAR_NUM)
stkArgSize += TARGET_POINTER_SIZE;
#endif // UNIX_X86_ABI
#endif // TARGET_X86
/* Return, popping our arguments (if any) */
instGen_Return(stkArgSize);
}
}
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_AMD64)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: rcx = InitialSP, rdx = the exception object that was caught (see GT_CATCH_ARG)
* filter: rcx = InitialSP, rdx = the exception object to filter (see GT_CATCH_ARG)
* finally/fault: rcx = InitialSP
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: rax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: rax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The AMD64 funclet prolog sequence is:
*
* push ebp
* push callee-saved regs
* ; TODO-AMD64-CQ: We probably only need to save any callee-save registers that we actually use
* ; in the funclet. Currently, we save the same set of callee-saved regs calculated for
* ; the entire function.
* sub sp, XXX ; Establish the rest of the frame.
* ; XXX is determined by lvaOutgoingArgSpaceSize plus space for the PSP slot, aligned
* ; up to preserve stack alignment. If we push an odd number of registers, we also
* ; generate this, to keep the stack aligned.
*
* ; Fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested
* ; filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet
* ; epilog.
* ; Also, re-establish the frame pointer from the PSP.
*
* mov rbp, [rcx + PSP_slot_InitialSP_offset] ; Load the PSP (InitialSP of the main function stored in the
* ; PSP of the dynamically containing funclet or function)
* mov [rsp + PSP_slot_InitialSP_offset], rbp ; store the PSP in our frame
* lea ebp, [rbp + Function_InitialSP_to_FP_delta] ; re-establish the frame pointer of the parent frame. If
* ; Function_InitialSP_to_FP_delta==0, we don't need this
* ; instruction.
*
* The epilog sequence is then:
*
* add rsp, XXX
* pop callee-saved regs ; if necessary
* pop rbp
* ret
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
* | Return address |
* |-----------------------|
* | Saved EBP |
* |-----------------------|
* |Callee saved registers |
* |-----------------------|
* ~ possible 8 byte pad ~
* ~ for alignment ~
* |-----------------------|
* | PSP slot | // Omitted in CoreRT ABI
* |-----------------------|
* | Outgoing arg space | // this only exists if the function makes a call
* |-----------------------| <---- Initial SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* TODO-AMD64-Bug?: the frame pointer should really point to the PSP slot (the debugger seems to assume this
* in DacDbiInterfaceImpl::InitParentFrameInfo()), or someplace above Initial-SP. There is an AMD64
* UNWIND_INFO restriction that it must be within 240 bytes of Initial-SP. See jit64\amd64\inc\md.h
* "FRAMEPTR OFFSETS" for details.
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
assert(!regSet.rsRegsModified(RBM_FPBASE));
assert(block != nullptr);
assert(block->bbFlags & BBF_FUNCLET_BEG);
assert(isFramePointerUsed());
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// We need to push ebp, since it's callee-saved.
// We need to push the callee-saved registers. We only need to push the ones that we need, but we don't
// keep track of that on a per-funclet basis, so we push the same set as in the main function.
// The only fixed-size frame we need to allocate is whatever is big enough for the PSPSym, since nothing else
// is stored here (all temps are allocated in the parent frame).
// We do need to allocate the outgoing argument space, in case there are calls here. This must be the same
// size as the parent frame's outgoing argument space, to keep the PSPSym offset the same.
inst_RV(INS_push, REG_FPBASE, TYP_REF);
compiler->unwindPush(REG_FPBASE);
// Callee saved int registers are pushed to stack.
genPushCalleeSavedRegisters();
regMaskTP maskArgRegsLiveIn;
if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_ARG_0;
}
else
{
maskArgRegsLiveIn = RBM_ARG_0 | RBM_ARG_2;
}
regNumber initReg = REG_EBP; // We already saved EBP, so it can be trashed
bool initRegZeroed = false;
genAllocLclFrame(genFuncletInfo.fiSpDelta, initReg, &initRegZeroed, maskArgRegsLiveIn);
// Callee saved float registers are copied to stack in their assigned stack slots
// after allocating space for them as part of funclet frame.
genPreserveCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done.
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
GetEmitter()->emitIns_R_AR(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_ARG_0, genFuncletInfo.fiPSP_slot_InitialSP_offset);
regSet.verifyRegUsed(REG_FPBASE);
GetEmitter()->emitIns_AR_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, genFuncletInfo.fiPSP_slot_InitialSP_offset);
if (genFuncletInfo.fiFunction_InitialSP_to_FP_delta != 0)
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE,
genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
}
// We've modified EBP, but not really. Say that we haven't...
regSet.rsRemoveRegsModified(RBM_FPBASE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*
* Note that we don't do anything with unwind codes, because AMD64 only cares about unwind codes for the prolog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Restore callee saved XMM regs from their stack slots before modifying SP
// to position at callee saved int regs.
genRestoreCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
inst_RV_IV(INS_add, REG_SPBASE, genFuncletInfo.fiSpDelta, EA_PTRSIZE);
genPopCalleeSavedRegisters();
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
// Note that compLclFrameSize can't be used (for can we call functions that depend on it),
// because we're not going to allocate the same size frame as the parent.
assert(isFramePointerUsed());
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be
// finalized
assert(compiler->compCalleeFPRegsSavedMask != (regMaskTP)-1); // The float registers to be preserved is finalized
// Even though lvaToInitialSPRelativeOffset() depends on compLclFrameSize,
// that's ok, because we're figuring out an offset in the parent frame.
genFuncletInfo.fiFunction_InitialSP_to_FP_delta =
compiler->lvaToInitialSPRelativeOffset(0, true); // trick to find the Initial-SP-relative offset of the frame
// pointer.
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
#ifndef UNIX_AMD64_ABI
// No 4 slots for outgoing params on the stack for System V systems.
assert((compiler->lvaOutgoingArgSpaceSize == 0) ||
(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES))); // On AMD64, we always have 4 outgoing argument
// slots if there are any calls in the function.
#endif // UNIX_AMD64_ABI
unsigned offset = compiler->lvaOutgoingArgSpaceSize;
genFuncletInfo.fiPSP_slot_InitialSP_offset = offset;
// How much stack do we allocate in the funclet?
// We need to 16-byte align the stack.
unsigned totalFrameSize =
REGSIZE_BYTES // return address
+ REGSIZE_BYTES // pushed EBP
+ (compiler->compCalleeRegsPushed * REGSIZE_BYTES); // pushed callee-saved int regs, not including EBP
// Entire 128-bits of XMM register is saved to stack due to ABI encoding requirement.
// Copying entire XMM register to/from memory will be performant if SP is aligned at XMM_REGSIZE_BYTES boundary.
unsigned calleeFPRegsSavedSize = genCountBits(compiler->compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES;
unsigned FPRegsPad = (calleeFPRegsSavedSize > 0) ? AlignmentPad(totalFrameSize, XMM_REGSIZE_BYTES) : 0;
unsigned PSPSymSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? REGSIZE_BYTES : 0;
totalFrameSize += FPRegsPad // Padding before pushing entire xmm regs
+ calleeFPRegsSavedSize // pushed callee-saved float regs
// below calculated 'pad' will go here
+ PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
unsigned pad = AlignmentPad(totalFrameSize, 16);
genFuncletInfo.fiSpDelta = FPRegsPad // Padding to align SP on XMM_REGSIZE_BYTES boundary
+ calleeFPRegsSavedSize // Callee saved xmm regs
+ pad + PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Function InitialSP-to-FP delta: %d\n", genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
printf(" SP delta: %d\n", genFuncletInfo.fiSpDelta);
printf(" PSP slot Initial SP offset: %d\n", genFuncletInfo.fiPSP_slot_InitialSP_offset);
}
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(genFuncletInfo.fiPSP_slot_InitialSP_offset ==
compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
// funclet!
}
#endif // DEBUG
}
#elif defined(TARGET_X86)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: eax = the exception object that was caught (see GT_CATCH_ARG)
* filter: eax = the exception object that was caught (see GT_CATCH_ARG)
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: eax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: eax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* Funclet prolog/epilog sequence and funclet frame layout are TBD.
*
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// TODO We may need EBP restore sequence here if we introduce PSPSym
// Add a padding for 16-byte alignment
inst_RV_IV(INS_sub, REG_SPBASE, 12, EA_PTRSIZE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Revert a padding that was added for 16-byte alignment
inst_RV_IV(INS_add, REG_SPBASE, 12, EA_PTRSIZE);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
}
#endif // TARGET_X86
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
#if defined(TARGET_AMD64)
// The PSP sym value is Initial-SP, not Caller-SP!
// We assume that RSP is Initial-SP when this function is called. That is, the stack frame
// has been established.
//
// We generate:
// mov [rbp-20h], rsp // store the Initial-SP (our current rsp) in the PSPsym
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaPSPSym, 0);
#else // TARGET*
NYI("Set function PSP sym");
#endif // TARGET*
}
#endif // FEATURE_EH_FUNCLETS
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
assert(compiler->getSIMDSupportLevel() >= SIMD_SSE2_Supported);
emitter* emit = GetEmitter();
regNumber frameReg = genFramePointerReg();
regNumber zeroReg = REG_NA;
int blkSize = untrLclHi - untrLclLo;
int minSimdSize = XMM_REGSIZE_BYTES;
assert(blkSize >= 0);
noway_assert((blkSize % sizeof(int)) == 0);
// initReg is not a live incoming argument reg
assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) == 0);
#if defined(TARGET_AMD64)
// We will align on x64 so can use the aligned mov
instruction simdMov = simdAlignedMovIns();
// Aligning low we want to move up to next boundary
int alignedLclLo = (untrLclLo + (XMM_REGSIZE_BYTES - 1)) & -XMM_REGSIZE_BYTES;
if ((untrLclLo != alignedLclLo) && (blkSize < 2 * XMM_REGSIZE_BYTES))
{
// If unaligned and smaller then 2 x SIMD size we won't bother trying to align
assert((alignedLclLo - untrLclLo) < XMM_REGSIZE_BYTES);
simdMov = simdUnalignedMovIns();
}
#else // !defined(TARGET_AMD64)
// We aren't going to try and align on x86
instruction simdMov = simdUnalignedMovIns();
int alignedLclLo = untrLclLo;
#endif // !defined(TARGET_AMD64)
if (blkSize < minSimdSize)
{
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= blkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
#if defined(TARGET_AMD64)
assert((i == blkSize) || (i + (int)sizeof(int) == blkSize));
if (i != blkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == blkSize);
}
else
{
// Grab a non-argument, non-callee saved XMM reg
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
// System V x64 first temp reg is xmm8
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM8);
#else
// Windows first temp reg is xmm4
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM4);
#endif // UNIX_AMD64_ABI
#if defined(TARGET_AMD64)
int alignedLclHi;
int alignmentHiBlkSize;
if ((blkSize < 2 * XMM_REGSIZE_BYTES) || (untrLclLo == alignedLclLo))
{
// Either aligned or smaller then 2 x SIMD size so we won't try to align
// However, we still want to zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
alignmentHiBlkSize = blkSize - alignmentBlkSize;
alignedLclHi = untrLclLo + alignmentBlkSize;
alignedLclLo = untrLclLo;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
}
else
{
// We are going to align
// Aligning high we want to move down to previous boundary
alignedLclHi = untrLclHi & -XMM_REGSIZE_BYTES;
// Zero out the unaligned portions
alignmentHiBlkSize = untrLclHi - alignedLclHi;
int alignmentLoBlkSize = alignedLclLo - untrLclLo;
blkSize = alignedLclHi - alignedLclLo;
assert((blkSize + alignmentLoBlkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
assert(alignmentLoBlkSize > 0);
assert(alignmentLoBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclLo - alignmentLoBlkSize) == untrLclLo);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentLoBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
assert((i == alignmentLoBlkSize) || (i + (int)sizeof(int) == alignmentLoBlkSize));
if (i != alignmentLoBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
assert(i == alignmentLoBlkSize);
}
#else // !defined(TARGET_AMD64)
// While we aren't aligning the start, we still want to
// zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
int alignmentHiBlkSize = blkSize - alignmentBlkSize;
int alignedLclHi = untrLclLo + alignmentBlkSize;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
#endif // !defined(TARGET_AMD64)
// The loop is unrolled 3 times so we do not move to the loop block until it
// will loop at least once so the threshold is 6.
if (blkSize < (6 * XMM_REGSIZE_BYTES))
{
// Generate the following code:
//
// xorps xmm4, xmm4
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// ...
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// mov qword ptr [ebp/esp-OFFS], rax
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
int i = 0;
for (; i < blkSize; i += XMM_REGSIZE_BYTES)
{
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo + i);
}
assert(i == blkSize);
}
else
{
// Generate the following code:
//
// xorps xmm4, xmm4
// ;movaps xmmword ptr[ebp/esp-loOFFS], xmm4 ; alignment to 3x
// ;movaps xmmword ptr[ebp/esp-loOFFS + 10H], xmm4 ;
// mov rax, - <size> ; start offset from hi
// movaps xmmword ptr[rbp + rax + hiOFFS ], xmm4 ; <--+
// movaps xmmword ptr[rbp + rax + hiOFFS + 10H], xmm4 ; |
// movaps xmmword ptr[rbp + rax + hiOFFS + 20H], xmm4 ; | Loop
// add rax, 48 ; |
// jne SHORT -5 instr ; ---+
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
// How many extra don't fit into the 3x unroll
int extraSimd = (blkSize % (XMM_REGSIZE_BYTES * 3)) / XMM_REGSIZE_BYTES;
if (extraSimd != 0)
{
blkSize -= XMM_REGSIZE_BYTES;
// Not a multiple of 3 so add stores at low end of block
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo);
if (extraSimd == 2)
{
blkSize -= XMM_REGSIZE_BYTES;
// one more store needed
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg,
alignedLclLo + XMM_REGSIZE_BYTES);
}
}
// Exact multiple of 3 simd lengths (or loop end condition will not be met)
noway_assert((blkSize % (3 * XMM_REGSIZE_BYTES)) == 0);
// At least 3 simd lengths remain (as loop is 3x unrolled and we want it to loop at least once)
assert(blkSize >= (3 * XMM_REGSIZE_BYTES));
// In range at start of loop
assert((alignedLclHi - blkSize) >= untrLclLo);
assert(((alignedLclHi - blkSize) + (XMM_REGSIZE_BYTES * 2)) < (untrLclHi - XMM_REGSIZE_BYTES));
// In range at end of loop
assert((alignedLclHi - (3 * XMM_REGSIZE_BYTES) + (2 * XMM_REGSIZE_BYTES)) <=
(untrLclHi - XMM_REGSIZE_BYTES));
assert((alignedLclHi - (blkSize + extraSimd * XMM_REGSIZE_BYTES)) == alignedLclLo);
// Set loop counter
emit->emitIns_R_I(INS_mov, EA_PTRSIZE, initReg, -(ssize_t)blkSize);
// Loop start
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1, alignedLclHi);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + XMM_REGSIZE_BYTES);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + 2 * XMM_REGSIZE_BYTES);
emit->emitIns_R_I(INS_add, EA_PTRSIZE, initReg, XMM_REGSIZE_BYTES * 3);
// Loop until counter is 0
emit->emitIns_J(INS_jne, nullptr, -5);
// initReg will be zero at end of the loop
*pInitRegZeroed = true;
}
if (untrLclHi != alignedLclHi)
{
assert(alignmentHiBlkSize > 0);
assert(alignmentHiBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclHi + alignmentHiBlkSize) == untrLclHi);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentHiBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, alignedLclHi + i);
}
#if defined(TARGET_AMD64)
assert((i == alignmentHiBlkSize) || (i + (int)sizeof(int) == alignmentHiBlkSize));
if (i != alignmentHiBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, alignedLclHi + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == alignmentHiBlkSize);
}
}
}
// Save compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize)
{
genVzeroupperIfNeeded(false);
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
unsigned offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
// Offset is 16-byte aligned since we use movaps for preserving xmm regs.
assert((offset % 16) == 0);
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to preserve lower 128-bits of YMM register.
GetEmitter()->emitIns_AR_R(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
}
// Save/Restore compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
{
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
genVzeroupperIfNeeded();
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned firstFPRegPadding = 0;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
unsigned offset;
regNumber regBase;
if (compiler->compLocallocUsed)
{
// localloc frame: use frame pointer relative offset
assert(isFramePointerUsed());
regBase = REG_FPBASE;
offset = lclFrameSize - genSPtoFPdelta() - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
else
{
regBase = REG_SPBASE;
offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
#ifdef TARGET_AMD64
// Offset is 16-byte aligned since we use movaps for restoring xmm regs
assert((offset % 16) == 0);
#endif // TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to restore lower 128-bits of YMM register.
GetEmitter()->emitIns_R_AR(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, regBase, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
genVzeroupperIfNeeded();
}
// Generate Vzeroupper instruction as needed to zero out upper 128b-bit of all YMM registers so that the
// AVX/Legacy SSE transition penalties can be avoided. This function is been used in genPreserveCalleeSavedFltRegs
// (prolog) and genRestoreCalleeSavedFltRegs (epilog). Issue VZEROUPPER in Prolog if the method contains
// 128-bit or 256-bit AVX code, to avoid legacy SSE to AVX transition penalty, which could happen when native
// code contains legacy SSE code calling into JIT AVX code (e.g. reverse pinvoke). Issue VZEROUPPER in Epilog
// if the method contains 256-bit AVX code, to avoid AVX to legacy SSE transition penalty.
//
// Params
// check256bitOnly - true to check if the function contains 256-bit AVX instruction and generate Vzeroupper
// instruction, false to check if the function contains AVX instruciton (either 128-bit or 256-bit).
//
void CodeGen::genVzeroupperIfNeeded(bool check256bitOnly /* = true*/)
{
bool emitVzeroUpper = false;
if (check256bitOnly)
{
emitVzeroUpper = GetEmitter()->Contains256bitAVX();
}
else
{
emitVzeroUpper = GetEmitter()->ContainsAVX();
}
if (emitVzeroUpper)
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (Load-only is no-op on xarch)
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// only full barrier needs to be emitted on Xarch
if (barrierKind == BARRIER_FULL)
{
instGen(INS_lock);
GetEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
}
}
#ifdef TARGET_AMD64
// Returns relocation type hint for an addr.
// Note that there are no reloc hints on x86.
//
// Arguments
// addr - data address
//
// Returns
// relocation type hint
//
unsigned short CodeGenInterface::genAddrRelocTypeHint(size_t addr)
{
return compiler->eeGetRelocTypeHint((void*)addr);
}
#endif // TARGET_AMD64
// Return true if an absolute indirect data address can be encoded as IP-relative.
// offset. Note that this method should be used only when the caller knows that
// the address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect data address
//
// Returns
// true if indir data addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return false;
#endif
}
// Return true if an indirect code address can be encoded as IP-relative offset.
// Note that this method should be used only when the caller knows that the
// address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return true;
#endif
}
// Return true if an indirect code address can be encoded as 32-bit displacement
// relative to zero. Note that this method should be used only when the caller
// knows that the address is an icon value that VM has given and there is no
// GenTree node representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - absolute indirect code address
//
// Returns
// true if absolute indir code addr could be encoded as 32-bit displacement relative to zero.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsZeroRelOffset(size_t addr)
{
return GenTreeIntConCommon::FitsInI32((ssize_t)addr);
}
// Return true if an absolute indirect code address needs a relocation recorded with VM.
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// See if the code indir addr can be encoded as 32-bit displacement relative to zero.
// We don't need a relocation in that case.
if (genCodeIndirAddrCanBeEncodedAsZeroRelOffset(addr))
{
return false;
}
// Else we need a relocation.
return true;
#else // TARGET_X86
// On x86 there is no need to record or ask for relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
// Return true if a direct code address needs to be marked as relocatable.
//
// Arguments
// addr - absolute direct code address
//
// Returns
// true if direct code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// By default all direct code addresses go through relocation so that VM will setup
// a jump stub if addr cannot be encoded as pc-relative offset.
return true;
#else // TARGET_X86
// On x86 there is no need for recording relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
#endif // TARGET_XARCH
| 1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/gentree.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#include "hwintrinsic.h"
#include "simd.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
/*****************************************************************************/
const unsigned char GenTree::gtOperKindTable[] = {
#define GTNODE(en, st, cm, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm,
#include "gtlist.h"
};
#ifdef DEBUG
const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = {
#define GTNODE(en, st, cm, ok) static_cast<GenTreeDebugOperKind>((ok)&DBK_MASK),
#include "gtlist.h"
};
#endif // DEBUG
/*****************************************************************************
*
* The types of different GenTree nodes
*/
#ifdef DEBUG
#define INDENT_SIZE 3
//--------------------------------------------
//
// IndentStack: This struct is used, along with its related enums and strings,
// to control both the indendtation and the printing of arcs.
//
// Notes:
// The mode of printing is set in the Constructor, using its 'compiler' argument.
// Currently it only prints arcs when fgOrder == fgOrderLinear.
// The type of arc to print is specified by the IndentInfo enum, and is controlled
// by the caller of the Push() method.
enum IndentChars
{
ICVertical,
ICBottom,
ICTop,
ICMiddle,
ICDash,
ICTerminal,
ICError,
IndentCharCount
};
// clang-format off
// Sets of strings for different dumping options vert bot top mid dash embedded terminal error
static const char* emptyIndents[IndentCharCount] = { " ", " ", " ", " ", " ", "", "?" };
static const char* asciiIndents[IndentCharCount] = { "|", "\\", "/", "+", "-", "*", "?" };
static const char* unicodeIndents[IndentCharCount] = { "\xe2\x94\x82", "\xe2\x94\x94", "\xe2\x94\x8c", "\xe2\x94\x9c", "\xe2\x94\x80", "\xe2\x96\x8c", "?" };
// clang-format on
typedef ArrayStack<Compiler::IndentInfo> IndentInfoStack;
struct IndentStack
{
IndentInfoStack stack;
const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly))
{
if (compiler->asciiTrees)
{
indents = asciiIndents;
}
else
{
indents = unicodeIndents;
}
}
// Return the depth of the current indentation.
unsigned Depth()
{
return stack.Height();
}
// Push a new indentation onto the stack, of the given type.
void Push(Compiler::IndentInfo info)
{
stack.Push(info);
}
// Pop the most recent indentation type off the stack.
Compiler::IndentInfo Pop()
{
return stack.Pop();
}
// Print the current indentation and arcs.
void print()
{
unsigned indentCount = Depth();
for (unsigned i = 0; i < indentCount; i++)
{
unsigned index = indentCount - 1 - i;
switch (stack.Top(index))
{
case Compiler::IndentInfo::IINone:
printf(" ");
break;
case Compiler::IndentInfo::IIArc:
if (index == 0)
{
printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
}
else
{
printf("%s ", indents[ICVertical]);
}
break;
case Compiler::IndentInfo::IIArcBottom:
printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIArcTop:
printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIError:
printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
break;
default:
unreached();
}
}
printf("%s", indents[ICTerminal]);
}
};
//------------------------------------------------------------------------
// printIndent: This is a static method which simply invokes the 'print'
// method on its 'indentStack' argument.
//
// Arguments:
// indentStack - specifies the information for the indentation & arcs to be printed
//
// Notes:
// This method exists to localize the checking for the case where indentStack is null.
static void printIndent(IndentStack* indentStack)
{
if (indentStack == nullptr)
{
return;
}
indentStack->print();
}
#endif
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* opNames[] = {
#define GTNODE(en, st, cm, ok) #en,
#include "gtlist.h"
};
const char* GenTree::OpName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opNames));
return opNames[op];
}
#endif
#if MEASURE_NODE_SIZE
static const char* opStructNames[] = {
#define GTNODE(en, st, cm, ok) #st,
#include "gtlist.h"
};
const char* GenTree::OpStructName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opStructNames));
return opStructNames[op];
}
#endif
//
// We allocate tree nodes in 2 different sizes:
// - TREE_NODE_SZ_SMALL for most nodes
// - TREE_NODE_SZ_LARGE for the few nodes (such as calls) that have
// more fields and take up a lot more space.
//
/* GT_COUNT'th oper is overloaded as 'undefined oper', so allocate storage for GT_COUNT'th oper also */
/* static */
unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
unsigned char GenTree::s_gtTrueSizes[GT_COUNT + 1]{
#define GTNODE(en, st, cm, ok) sizeof(st),
#include "gtlist.h"
};
#endif // NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
#if COUNT_AST_OPERS
unsigned GenTree::s_gtNodeCounts[GT_COUNT + 1] = {0};
#endif // COUNT_AST_OPERS
/* static */
void GenTree::InitNodeSize()
{
/* Set all sizes to 'small' first */
for (unsigned op = 0; op <= GT_COUNT; op++)
{
GenTree::s_gtNodeSizes[op] = TREE_NODE_SZ_SMALL;
}
// Now set all of the appropriate entries to 'large'
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
if (GlobalJitOptions::compFeatureHfa
#if defined(UNIX_AMD64_ABI)
|| true
#endif // defined(UNIX_AMD64_ABI)
)
{
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
}
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FTN_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOUNDS_CHECK] = TREE_NODE_SZ_SMALL;
GenTree::s_gtNodeSizes[GT_ARR_ELEM] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
GenTree::s_gtNodeSizes[GT_DIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UDIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_MOD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UMOD] = TREE_NODE_SZ_LARGE;
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
GenTree::s_gtNodeSizes[GT_PUTARG_STK] = TREE_NODE_SZ_LARGE;
#if FEATURE_ARG_SPLIT
GenTree::s_gtNodeSizes[GT_PUTARG_SPLIT] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
assert(GenTree::s_gtNodeSizes[GT_RETURN] == GenTree::s_gtNodeSizes[GT_ASG]);
// This list of assertions should come to contain all GenTree subtypes that are declared
// "small".
assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]);
assert(sizeof(GenTreeLclVar) <= GenTree::s_gtNodeSizes[GT_LCL_VAR]);
static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCC) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFieldList) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndexAddr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
#ifndef FEATURE_PUT_STRUCT_ARG_STK
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
#else // FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
#if FEATURE_ARG_SPLIT
static_assert_no_msg(sizeof(GenTreePutArgSplit) <= TREE_NODE_SZ_LARGE);
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
#ifdef FEATURE_SIMD
static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static_assert_no_msg(sizeof(GenTreeHWIntrinsic) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_HW_INTRINSICS
// clang-format on
}
size_t GenTree::GetNodeSize() const
{
return GenTree::s_gtNodeSizes[gtOper];
}
#ifdef DEBUG
bool GenTree::IsNodeProperlySized() const
{
size_t size;
if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
{
size = TREE_NODE_SZ_SMALL;
}
else
{
assert(gtDebugFlags & GTF_DEBUG_NODE_LARGE);
size = TREE_NODE_SZ_LARGE;
}
return GenTree::s_gtNodeSizes[gtOper] <= size;
}
#endif
//------------------------------------------------------------------------
// ReplaceWith: replace this with the src node. The source must be an isolated node
// and cannot be used after the replacement.
//
// Arguments:
// src - source tree, that replaces this.
// comp - the compiler instance to transfer annotations for arrays.
//
void GenTree::ReplaceWith(GenTree* src, Compiler* comp)
{
// The source may be big only if the target is also a big node
assert((gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[src->gtOper] == TREE_NODE_SZ_SMALL);
// The check is effective only if nodes have been already threaded.
assert((src->gtPrev == nullptr) && (src->gtNext == nullptr));
RecordOperBashing(OperGet(), src->OperGet()); // nop unless NODEBASH_STATS is enabled
GenTree* prev = gtPrev;
GenTree* next = gtNext;
// The VTable pointer is copied intentionally here
memcpy((void*)this, (void*)src, src->GetNodeSize());
this->gtPrev = prev;
this->gtNext = next;
#ifdef DEBUG
gtSeqNum = 0;
#endif
// Transfer any annotations.
if (src->OperGet() == GT_IND && src->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
assert(b);
comp->GetArrayInfoMap()->Set(this, arrInfo);
}
DEBUG_DESTROY_NODE(src);
}
/*****************************************************************************
*
* When 'NODEBASH_STATS' is enabled in "jit.h" we record all instances of
* an existing GenTree node having its operator changed. This can be useful
* for two (related) things - to see what is being bashed (and what isn't),
* and to verify that the existing choices for what nodes are marked 'large'
* are reasonable (to minimize "wasted" space).
*
* And yes, the hash function / logic is simplistic, but it is conflict-free
* and transparent for what we need.
*/
#if NODEBASH_STATS
#define BASH_HASH_SIZE 211
inline unsigned hashme(genTreeOps op1, genTreeOps op2)
{
return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE;
}
struct BashHashDsc
{
unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
unsigned __int32 bhCount; // the same old->new bashings seen so far
unsigned __int8 bhOperOld; // original gtOper
unsigned __int8 bhOperNew; // new gtOper
};
static BashHashDsc BashHash[BASH_HASH_SIZE];
void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{
unsigned hash = hashme(operOld, operNew);
BashHashDsc* desc = BashHash + hash;
if (desc->bhFullHash != hash)
{
noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
desc->bhFullHash = hash;
}
desc->bhCount += 1;
desc->bhOperOld = operOld;
desc->bhOperNew = operNew;
}
void GenTree::ReportOperBashing(FILE* f)
{
unsigned total = 0;
fflush(f);
fprintf(f, "\n");
fprintf(f, "Bashed gtOper stats:\n");
fprintf(f, "\n");
fprintf(f, " Old operator New operator #bytes old->new Count\n");
fprintf(f, " ---------------------------------------------------------------\n");
for (unsigned h = 0; h < BASH_HASH_SIZE; h++)
{
unsigned count = BashHash[h].bhCount;
if (count == 0)
continue;
unsigned opOld = BashHash[h].bhOperOld;
unsigned opNew = BashHash[h].bhOperNew;
fprintf(f, " GT_%-13s -> GT_%-13s [size: %3u->%3u] %c %7u\n", OpName((genTreeOps)opOld),
OpName((genTreeOps)opNew), s_gtTrueSizes[opOld], s_gtTrueSizes[opNew],
(s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ', count);
total += count;
}
fprintf(f, "\n");
fprintf(f, "Total bashings: %u\n", total);
fprintf(f, "\n");
fflush(f);
}
#endif // NODEBASH_STATS
/*****************************************************************************/
#if MEASURE_NODE_SIZE
void GenTree::DumpNodeSizes(FILE* fp)
{
// Dump the sizes of the various GenTree flavors
fprintf(fp, "Small tree node size = %zu bytes\n", TREE_NODE_SZ_SMALL);
fprintf(fp, "Large tree node size = %zu bytes\n", TREE_NODE_SZ_LARGE);
fprintf(fp, "\n");
// Verify that node sizes are set kosherly and dump sizes
for (unsigned op = GT_NONE + 1; op < GT_COUNT; op++)
{
unsigned needSize = s_gtTrueSizes[op];
unsigned nodeSize = s_gtNodeSizes[op];
const char* structNm = OpStructName((genTreeOps)op);
const char* operName = OpName((genTreeOps)op);
bool repeated = false;
// Have we seen this struct flavor before?
for (unsigned mop = GT_NONE + 1; mop < op; mop++)
{
if (strcmp(structNm, OpStructName((genTreeOps)mop)) == 0)
{
repeated = true;
break;
}
}
// Don't repeat the same GenTree flavor unless we have an error
if (!repeated || needSize > nodeSize)
{
unsigned sizeChar = '?';
if (nodeSize == TREE_NODE_SZ_SMALL)
sizeChar = 'S';
else if (nodeSize == TREE_NODE_SZ_LARGE)
sizeChar = 'L';
fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName, structNm, needSize, sizeChar);
if (needSize > nodeSize)
{
fprintf(fp, " -- ERROR -- allocation is only %u bytes!", nodeSize);
}
else if (needSize <= TREE_NODE_SZ_SMALL && nodeSize == TREE_NODE_SZ_LARGE)
{
fprintf(fp, " ... could be small");
}
fprintf(fp, "\n");
}
}
}
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
*
* Walk all basic blocks and call the given function pointer for all tree
* nodes contained therein.
*/
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
}
}
//-----------------------------------------------------------
// CopyReg: Copy the _gtRegNum/gtRegTag fields.
//
// Arguments:
// from - GenTree node from which to copy
//
// Return Value:
// None
void GenTree::CopyReg(GenTree* from)
{
_gtRegNum = from->_gtRegNum;
INDEBUG(gtRegTag = from->gtRegTag;)
// Also copy multi-reg state if this is a call node
if (IsCall())
{
assert(from->IsCall());
this->AsCall()->CopyOtherRegs(from->AsCall());
}
else if (IsCopyOrReload())
{
this->AsCopyOrReload()->CopyOtherRegs(from->AsCopyOrReload());
}
}
//------------------------------------------------------------------
// gtHasReg: Whether node been assigned a register by LSRA
//
// Arguments:
// comp - Compiler instance. Required for multi-reg lcl var; ignored otherwise.
//
// Return Value:
// Returns true if the node was assigned a register.
//
// In case of multi-reg call nodes, it is considered having a reg if regs are allocated for ALL its
// return values.
// REVIEW: why is this ALL and the other cases are ANY? Explain.
//
// In case of GT_COPY or GT_RELOAD of a multi-reg call, GT_COPY/GT_RELOAD is considered having a reg if it
// has a reg assigned to ANY of its positions.
//
// In case of multi-reg local vars, it is considered having a reg if it has a reg assigned for ANY
// of its positions.
//
bool GenTree::gtHasReg(Compiler* comp) const
{
bool hasReg = false;
if (IsMultiRegCall())
{
const GenTreeCall* call = AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg call node is said to have regs, if it has
// reg assigned to each of its result registers.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (call->GetRegNumByIdx(i) != REG_NA);
if (!hasReg)
{
break;
}
}
}
else if (IsCopyOrReloadOfMultiRegCall())
{
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg copy or reload node is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (copyOrReload->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else if (IsMultiRegLclVar())
{
assert(comp != nullptr);
const GenTreeLclVar* lclNode = AsLclVar();
const unsigned regCount = GetMultiRegCount(comp);
// A Multi-reg local vars is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; i++)
{
hasReg = (lclNode->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else
{
hasReg = (GetRegNum() != REG_NA);
}
return hasReg;
}
//-----------------------------------------------------------------------------
// GetRegisterDstCount: Get the number of registers defined by the node.
//
// Arguments:
// None
//
// Return Value:
// The number of registers that this node defines.
//
// Notes:
// This should not be called on a contained node.
// This does not look at the actual register assignments, if any, and so
// is valid after Lowering.
//
int GenTree::GetRegisterDstCount(Compiler* compiler) const
{
assert(!isContained());
if (!IsMultiRegNode())
{
return (IsValue()) ? 1 : 0;
}
else if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
else if (IsCopyOrReload())
{
return gtGetOp1()->GetRegisterDstCount(compiler);
}
#if FEATURE_ARG_SPLIT
else if (OperIsPutArgSplit())
{
return (const_cast<GenTree*>(this))->AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
else if (OperIsMultiRegOp())
{
// A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST.
// For the latter two (ARM-only), they only have multiple registers if they produce a long value
// (GT_MUL_LONG always produces a long value).
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
return (TypeGet() == TYP_LONG) ? 2 : 1;
#else
assert(OperIs(GT_MUL_LONG));
return 2;
#endif
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (OperIsHWIntrinsic())
{
assert(TypeIs(TYP_STRUCT));
const GenTreeHWIntrinsic* intrinsic = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = intrinsic->GetHWIntrinsicId();
assert(HWIntrinsicInfo::IsMultiReg(intrinsicId));
return HWIntrinsicInfo::GetMultiRegCount(intrinsicId);
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetFieldCount(compiler);
}
assert(!"Unexpected multi-reg node");
return 0;
}
//-----------------------------------------------------------------------------------
// IsMultiRegNode: whether a node returning its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi-reg node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
bool GenTree::IsMultiRegNode() const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return true;
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return true;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return true;
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return true;
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::IsMultiReg(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetMultiRegCount: Return the register count for a multi-reg node.
//
// Arguments:
// comp - Compiler instance. Required for MultiRegLclVar, unused otherwise.
//
// Return Value:
// Returns the number of registers defined by this node.
//
unsigned GenTree::GetMultiRegCount(Compiler* comp) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegCount();
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegCount();
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::GetMultiRegCount(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
assert(comp != nullptr);
return AsLclVar()->GetFieldCount(comp);
}
assert(!"GetMultiRegCount called with non-multireg node");
return 1;
}
//---------------------------------------------------------------
// gtGetRegMask: Get the reg mask of the node.
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetRegMask() const
{
regMaskTP resultMask;
if (IsMultiRegCall())
{
resultMask = genRegMask(GetRegNum());
resultMask |= AsCall()->GetOtherRegMask();
}
else if (IsCopyOrReloadOfMultiRegCall())
{
// A multi-reg copy or reload, will have valid regs for only those
// positions that need to be copied or reloaded. Hence we need
// to consider only those registers for computing reg mask.
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = copyOrReload->GetRegNumByIdx(i);
if (reg != REG_NA)
{
resultMask |= genRegMask(reg);
}
}
}
#if FEATURE_ARG_SPLIT
else if (compFeatureArgSplit() && OperIsPutArgSplit())
{
const GenTreePutArgSplit* splitArg = AsPutArgSplit();
const unsigned regCount = splitArg->gtNumRegs;
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = splitArg->GetRegNumByIdx(i);
assert(reg != REG_NA);
resultMask |= genRegMask(reg);
}
}
#endif // FEATURE_ARG_SPLIT
else
{
resultMask = genRegMask(GetRegNum());
}
return resultMask;
}
void GenTreeFieldList::AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
}
void GenTreeFieldList::InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::InsertFieldLIR(
Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
}
//---------------------------------------------------------------
// GetOtherRegMask: Get the reg mask of gtOtherRegs of call node
//
// Arguments:
// None
//
// Return Value:
// Reg mask of gtOtherRegs of call node.
//
regMaskTP GenTreeCall::GetOtherRegMask() const
{
regMaskTP resultMask = RBM_NONE;
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
if (gtOtherRegs[i] != REG_NA)
{
resultMask |= genRegMask((regNumber)gtOtherRegs[i]);
continue;
}
break;
}
#endif
return resultMask;
}
//-------------------------------------------------------------------------
// IsPure:
// Returns true if this call is pure. For now, this uses the same
// definition of "pure" that is that used by HelperCallProperties: a
// pure call does not read or write any aliased (e.g. heap) memory or
// have other global side effects (e.g. class constructors, finalizers),
// but is allowed to throw an exception.
//
// NOTE: this call currently only returns true if the call target is a
// helper method that is known to be pure. No other analysis is
// performed.
//
// Arguments:
// Copiler - the compiler context.
//
// Returns:
// True if the call is pure; false otherwise.
//
bool GenTreeCall::IsPure(Compiler* compiler) const
{
return (gtCallType == CT_HELPER) &&
compiler->s_helperCallProperties.IsPure(compiler->eeGetHelperNum(gtCallMethHnd));
}
//-------------------------------------------------------------------------
// HasSideEffects:
// Returns true if this call has any side effects. All non-helpers are considered to have side-effects. Only helpers
// that do not mutate the heap, do not run constructors, may not throw, and are either a) pure or b) non-finalizing
// allocation functions are considered side-effect-free.
//
// Arguments:
// compiler - the compiler instance
// ignoreExceptions - when `true`, ignores exception side effects
// ignoreCctors - when `true`, ignores class constructor side effects
//
// Return Value:
// true if this call has any side-effects; false otherwise.
bool GenTreeCall::HasSideEffects(Compiler* compiler, bool ignoreExceptions, bool ignoreCctors) const
{
// Generally all GT_CALL nodes are considered to have side-effects, but we may have extra information about helper
// calls that can prove them side-effect-free.
if (gtCallType != CT_HELPER)
{
return true;
}
CorInfoHelpFunc helper = compiler->eeGetHelperNum(gtCallMethHnd);
HelperCallProperties& helperProperties = compiler->s_helperCallProperties;
// We definitely care about the side effects if MutatesHeap is true
if (helperProperties.MutatesHeap(helper))
{
return true;
}
// Unless we have been instructed to ignore cctors (CSE, for example, ignores cctors), consider them side effects.
if (!ignoreCctors && helperProperties.MayRunCctor(helper))
{
return true;
}
// If we also care about exceptions then check if the helper can throw
if (!ignoreExceptions && !helperProperties.NoThrow(helper))
{
return true;
}
// If this is not a Pure helper call or an allocator (that will not need to run a finalizer)
// then this call has side effects.
return !helperProperties.IsPure(helper) &&
(!helperProperties.IsAllocator(helper) || ((gtCallMoreFlags & GTF_CALL_M_ALLOC_SIDE_EFFECTS) != 0));
}
//-------------------------------------------------------------------------
// HasNonStandardAddedArgs: Return true if the method has non-standard args added to the call
// argument list during argument morphing (fgMorphArgs), e.g., passed in R10 or R11 on AMD64.
// See also GetNonStandardAddedArgCount().
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// true if there are any such args, false otherwise.
//
bool GenTreeCall::HasNonStandardAddedArgs(Compiler* compiler) const
{
return GetNonStandardAddedArgCount(compiler) != 0;
}
//-------------------------------------------------------------------------
// GetNonStandardAddedArgCount: Get the count of non-standard arguments that have been added
// during call argument morphing (fgMorphArgs). Do not count non-standard args that are already
// counted in the argument list prior to morphing.
//
// This function is used to help map the caller and callee arguments during tail call setup.
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// The count of args, as described.
//
// Notes:
// It would be more general to have fgMorphArgs set a bit on the call node when such
// args are added to a call, and a bit on each such arg, and then have this code loop
// over the call args when the special call bit is set, counting the args with the special
// arg bit. This seems pretty heavyweight, though. Instead, this logic needs to be kept
// in sync with fgMorphArgs.
//
int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
{
if (IsUnmanaged() && !compiler->opts.ShouldUsePInvokeHelpers())
{
// R11 = PInvoke cookie param
return 1;
}
else if (IsVirtualStub())
{
// R11 = Virtual stub param
return 1;
}
else if ((gtCallType == CT_INDIRECT) && (gtCallCookie != nullptr))
{
// R10 = PInvoke target param
// R11 = PInvoke cookie param
return 2;
}
return 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
// These two Jit Helpers that we handle here by returning true
// aren't actually defined to return a struct, so they don't expect
// their RetBuf to be passed in x8, instead they expect it in x0.
//
bool GenTreeCall::TreatAsHasRetBufArg(Compiler* compiler) const
{
if (HasRetBufArg())
{
return true;
}
else
{
// If we see a Jit helper call that returns a TYP_STRUCT we will
// transform it as if it has a Return Buffer Argument
//
if (IsHelperCall() && (gtReturnType == TYP_STRUCT))
{
// There are two possible helper calls that use this path:
// CORINFO_HELP_GETFIELDSTRUCT and CORINFO_HELP_UNBOX_NULLABLE
//
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(gtCallMethHnd);
if (helpFunc == CORINFO_HELP_GETFIELDSTRUCT)
{
return true;
}
else if (helpFunc == CORINFO_HELP_UNBOX_NULLABLE)
{
return true;
}
else
{
assert(!"Unexpected JIT helper in TreatAsHasRetBufArg");
}
}
}
return false;
}
//-------------------------------------------------------------------------
// IsHelperCall: Determine if this GT_CALL node is a specific helper call.
//
// Arguments:
// compiler - the compiler instance so that we can call eeFindHelper
//
// Return Value:
// Returns true if this GT_CALL node is a call to the specified helper.
//
bool GenTreeCall::IsHelperCall(Compiler* compiler, unsigned helper) const
{
return IsHelperCall(compiler->eeFindHelper(helper));
}
//------------------------------------------------------------------------
// GenTreeCall::ReplaceCallOperand:
// Replaces a given operand to a call node and updates the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTreeCall::ReplaceCallOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
GenTree* originalOperand = *useEdge;
*useEdge = replacement;
const bool isArgument =
(replacement != gtControlExpr) &&
((gtCallType != CT_INDIRECT) || ((replacement != gtCallCookie) && (replacement != gtCallAddr)));
if (isArgument)
{
if ((originalOperand->gtFlags & GTF_LATE_ARG) != 0)
{
replacement->gtFlags |= GTF_LATE_ARG;
}
else
{
assert((replacement->gtFlags & GTF_LATE_ARG) == 0);
fgArgTabEntry* fp = Compiler::gtArgEntryByNode(this, replacement);
assert(fp->GetNode() == replacement);
}
}
}
//-------------------------------------------------------------------------
// AreArgsComplete: Determine if this GT_CALL node's arguments have been processed.
//
// Return Value:
// Returns true if fgMorphArgs has processed the arguments.
//
bool GenTreeCall::AreArgsComplete() const
{
if (fgArgInfo == nullptr)
{
return false;
}
if (fgArgInfo->AreArgsComplete())
{
assert((gtCallLateArgs != nullptr) || !fgArgInfo->HasRegArgs());
return true;
}
#if defined(FEATURE_FASTTAILCALL)
// If we have FEATURE_FASTTAILCALL, 'fgCanFastTailCall()' can call 'fgInitArgInfo()', and in that
// scenario it is valid to have 'fgArgInfo' be non-null when 'fgMorphArgs()' first queries this,
// when it hasn't yet morphed the arguments.
#else
assert(gtCallArgs == nullptr);
#endif
return false;
}
//--------------------------------------------------------------------------
// Equals: Check if 2 CALL nodes are equal.
//
// Arguments:
// c1 - The first call node
// c2 - The second call node
//
// Return Value:
// true if the 2 CALL nodes have the same type and operands
//
bool GenTreeCall::Equals(GenTreeCall* c1, GenTreeCall* c2)
{
assert(c1->OperGet() == c2->OperGet());
if (c1->TypeGet() != c2->TypeGet())
{
return false;
}
if (c1->gtCallType != c2->gtCallType)
{
return false;
}
if (c1->gtCallType != CT_INDIRECT)
{
if (c1->gtCallMethHnd != c2->gtCallMethHnd)
{
return false;
}
#ifdef FEATURE_READYTORUN
if (c1->gtEntryPoint.addr != c2->gtEntryPoint.addr)
{
return false;
}
#endif
}
else
{
if (!Compare(c1->gtCallAddr, c2->gtCallAddr))
{
return false;
}
}
if ((c1->gtCallThisArg != nullptr) != (c2->gtCallThisArg != nullptr))
{
return false;
}
if ((c1->gtCallThisArg != nullptr) && !Compare(c1->gtCallThisArg->GetNode(), c2->gtCallThisArg->GetNode()))
{
return false;
}
GenTreeCall::UseIterator i1 = c1->Args().begin();
GenTreeCall::UseIterator end1 = c1->Args().end();
GenTreeCall::UseIterator i2 = c2->Args().begin();
GenTreeCall::UseIterator end2 = c2->Args().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
i1 = c1->LateArgs().begin();
end1 = c1->LateArgs().end();
i2 = c2->LateArgs().begin();
end2 = c2->LateArgs().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
if (!Compare(c1->gtControlExpr, c2->gtControlExpr))
{
return false;
}
return true;
}
//--------------------------------------------------------------------------
// ResetArgInfo: The argument info needs to be reset so it can be recomputed based on some change
// in conditions, such as changing the return type of a call due to giving up on doing a tailcall.
// If there is no fgArgInfo computed yet for this call, then there is nothing to reset.
//
void GenTreeCall::ResetArgInfo()
{
if (fgArgInfo == nullptr)
{
return;
}
// We would like to just set `fgArgInfo = nullptr`. But `fgInitArgInfo()` not
// only sets up fgArgInfo, it also adds non-standard args to the IR, and we need
// to remove that extra IR so it doesn't get added again.
//
unsigned argNum = 0;
if (gtCallThisArg != nullptr)
{
argNum++;
}
Use** link = >CallArgs;
while ((*link) != nullptr)
{
const fgArgTabEntry* entry = fgArgInfo->GetArgEntry(argNum);
if (entry->isNonStandard() && entry->isNonStandardArgAddedLate())
{
JITDUMP("Removing non-standarg arg %s [%06u] to prepare for re-morphing call [%06u]\n",
getNonStandardArgKindName(entry->nonStandardArgKind), Compiler::dspTreeID((*link)->GetNode()),
gtTreeID);
*link = (*link)->GetNext();
}
else
{
link = &(*link)->NextRef();
}
argNum++;
}
fgArgInfo = nullptr;
}
#if !defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned GenTreePutArgStk::GetStackByteSize() const
{
return genTypeSize(genActualType(gtOp1->gtType));
}
#endif // !defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Returns non-zero if the two trees are identical.
*/
bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
{
genTreeOps oper;
unsigned kind;
// printf("tree1:\n"); gtDispTree(op1);
// printf("tree2:\n"); gtDispTree(op2);
AGAIN:
if (op1 == nullptr)
{
return (op2 == nullptr);
}
if (op2 == nullptr)
{
return false;
}
if (op1 == op2)
{
return true;
}
oper = op1->OperGet();
/* The operators must be equal */
if (oper != op2->gtOper)
{
return false;
}
/* The types must be equal */
if (op1->gtType != op2->gtType)
{
return false;
}
/* Overflow must be equal */
if (op1->gtOverflowEx() != op2->gtOverflowEx())
{
return false;
}
/* Sensible flags must be equal */
if ((op1->gtFlags & (GTF_UNSIGNED)) != (op2->gtFlags & (GTF_UNSIGNED)))
{
return false;
}
/* Figure out what kind of nodes we're comparing */
kind = op1->OperKind();
/* Is this a constant node? */
if (op1->OperIsConst())
{
switch (oper)
{
case GT_CNS_INT:
if (op1->AsIntCon()->gtIconVal == op2->AsIntCon()->gtIconVal)
{
return true;
}
break;
case GT_CNS_STR:
if ((op1->AsStrCon()->gtSconCPX == op2->AsStrCon()->gtSconCPX) &&
(op1->AsStrCon()->gtScpHnd == op2->AsStrCon()->gtScpHnd))
{
return true;
}
break;
#if 0
// TODO-CQ: Enable this in the future
case GT_CNS_LNG:
if (op1->AsLngCon()->gtLconVal == op2->AsLngCon()->gtLconVal)
return true;
break;
case GT_CNS_DBL:
if (op1->AsDblCon()->gtDconVal == op2->AsDblCon()->gtDconVal)
return true;
break;
#endif
default:
break;
}
return false;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_LCL_VAR:
if (op1->AsLclVarCommon()->GetLclNum() != op2->AsLclVarCommon()->GetLclNum())
{
break;
}
return true;
case GT_LCL_FLD:
if ((op1->AsLclFld()->GetLclNum() != op2->AsLclFld()->GetLclNum()) ||
(op1->AsLclFld()->GetLclOffs() != op2->AsLclFld()->GetLclOffs()))
{
break;
}
return true;
case GT_CLS_VAR:
if (op1->AsClsVar()->gtClsVarHnd != op2->AsClsVar()->gtClsVarHnd)
{
break;
}
return true;
case GT_LABEL:
return true;
case GT_ARGPLACE:
if ((op1->gtType == TYP_STRUCT) &&
(op1->AsArgPlace()->gtArgPlaceClsHnd != op2->AsArgPlace()->gtArgPlaceClsHnd))
{
break;
}
return true;
default:
break;
}
return false;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_UNOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the comparison.
switch (oper)
{
case GT_ARR_LENGTH:
if (op1->AsArrLen()->ArrLenOffset() != op2->AsArrLen()->ArrLenOffset())
{
return false;
}
break;
case GT_CAST:
if (op1->AsCast()->gtCastType != op2->AsCast()->gtCastType)
{
return false;
}
break;
case GT_BLK:
case GT_OBJ:
if (op1->AsBlk()->GetLayout() != op2->AsBlk()->GetLayout())
{
return false;
}
break;
case GT_FIELD:
if (op1->AsField()->gtFldHnd != op2->AsField()->gtFldHnd)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
case GT_RUNTIMELOOKUP:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
return Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1);
}
if (kind & GTK_BINOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
if (op1->AsIntrinsic()->gtIntrinsicName != op2->AsIntrinsic()->gtIntrinsicName)
{
return false;
}
break;
case GT_LEA:
if (op1->AsAddrMode()->gtScale != op2->AsAddrMode()->gtScale)
{
return false;
}
if (op1->AsAddrMode()->Offset() != op2->AsAddrMode()->Offset())
{
return false;
}
break;
case GT_BOUNDS_CHECK:
if (op1->AsBoundsChk()->gtThrowKind != op2->AsBoundsChk()->gtThrowKind)
{
return false;
}
break;
case GT_INDEX:
if (op1->AsIndex()->gtIndElemSize != op2->AsIndex()->gtIndElemSize)
{
return false;
}
break;
case GT_INDEX_ADDR:
if (op1->AsIndexAddr()->gtElemSize != op2->AsIndexAddr()->gtElemSize)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_QMARK:
break;
default:
assert(!"unexpected binary ExOp operator");
}
}
if (op1->AsOp()->gtOp2)
{
if (!Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1, swapOK))
{
if (swapOK && OperIsCommutative(oper) &&
((op1->AsOp()->gtOp1->gtFlags | op1->AsOp()->gtOp2->gtFlags | op2->AsOp()->gtOp1->gtFlags |
op2->AsOp()->gtOp2->gtFlags) &
GTF_ALL_EFFECT) == 0)
{
if (Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp2, swapOK))
{
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
}
return false;
}
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
}
else
{
op1 = op1->AsOp()->gtOp1;
op2 = op2->AsOp()->gtOp1;
if (!op1)
{
return (op2 == nullptr);
}
if (!op2)
{
return false;
}
goto AGAIN;
}
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
return GenTreeCall::Equals(op1->AsCall(), op2->AsCall());
#ifdef FEATURE_SIMD
case GT_SIMD:
return GenTreeSIMD::Equals(op1->AsSIMD(), op2->AsSIMD());
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
return GenTreeHWIntrinsic::Equals(op1->AsHWIntrinsic(), op2->AsHWIntrinsic());
#endif
case GT_ARR_ELEM:
if (op1->AsArrElem()->gtArrRank != op2->AsArrElem()->gtArrRank)
{
return false;
}
// NOTE: gtArrElemSize may need to be handled
unsigned dim;
for (dim = 0; dim < op1->AsArrElem()->gtArrRank; dim++)
{
if (!Compare(op1->AsArrElem()->gtArrInds[dim], op2->AsArrElem()->gtArrInds[dim]))
{
return false;
}
}
op1 = op1->AsArrElem()->gtArrObj;
op2 = op2->AsArrElem()->gtArrObj;
goto AGAIN;
case GT_ARR_OFFSET:
if (op1->AsArrOffs()->gtCurrDim != op2->AsArrOffs()->gtCurrDim ||
op1->AsArrOffs()->gtArrRank != op2->AsArrOffs()->gtArrRank)
{
return false;
}
return (Compare(op1->AsArrOffs()->gtOffset, op2->AsArrOffs()->gtOffset) &&
Compare(op1->AsArrOffs()->gtIndex, op2->AsArrOffs()->gtIndex) &&
Compare(op1->AsArrOffs()->gtArrObj, op2->AsArrOffs()->gtArrObj));
case GT_PHI:
return GenTreePhi::Equals(op1->AsPhi(), op2->AsPhi());
case GT_FIELD_LIST:
return GenTreeFieldList::Equals(op1->AsFieldList(), op2->AsFieldList());
case GT_CMPXCHG:
return Compare(op1->AsCmpXchg()->gtOpLocation, op2->AsCmpXchg()->gtOpLocation) &&
Compare(op1->AsCmpXchg()->gtOpValue, op2->AsCmpXchg()->gtOpValue) &&
Compare(op1->AsCmpXchg()->gtOpComparand, op2->AsCmpXchg()->gtOpComparand);
case GT_STORE_DYN_BLK:
return Compare(op1->AsStoreDynBlk()->Addr(), op2->AsStoreDynBlk()->Addr()) &&
Compare(op1->AsStoreDynBlk()->Data(), op2->AsStoreDynBlk()->Data()) &&
Compare(op1->AsStoreDynBlk()->gtDynamicSize, op2->AsStoreDynBlk()->gtDynamicSize);
default:
assert(!"unexpected operator");
}
return false;
}
//------------------------------------------------------------------------
// gtHasRef: Find out whether the given tree contains a local/field.
//
// Arguments:
// tree - tree to find the local in
// lclNum - the local's number, *or* the handle for the field
//
// Return Value:
// Whether "tree" has any LCL_VAR/LCL_FLD nodes that refer to the
// local, LHS or RHS, or FIELD nodes with the specified handle.
//
// Notes:
// Does not pay attention to local address nodes.
//
/* static */ bool Compiler::gtHasRef(GenTree* tree, ssize_t lclNum)
{
if (tree == nullptr)
{
return false;
}
if (tree->OperIsLeaf())
{
if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD) && (tree->AsLclVarCommon()->GetLclNum() == (unsigned)lclNum))
{
return true;
}
if (tree->OperIs(GT_RET_EXPR))
{
return gtHasRef(tree->AsRetExpr()->gtInlineCandidate, lclNum);
}
return false;
}
if (tree->OperIsUnary())
{
// Code in importation (see CEE_STFLD in impImportBlockCode), when
// spilling, can pass us "lclNum" that is actually a field handle...
if (tree->OperIs(GT_FIELD) && (lclNum == (ssize_t)tree->AsField()->gtFldHnd))
{
return true;
}
return gtHasRef(tree->AsUnOp()->gtGetOp1(), lclNum);
}
if (tree->OperIsBinary())
{
return gtHasRef(tree->AsOp()->gtGetOp1(), lclNum) || gtHasRef(tree->AsOp()->gtGetOp2(), lclNum);
}
bool result = false;
tree->VisitOperands([lclNum, &result](GenTree* operand) -> GenTree::VisitResult {
if (gtHasRef(operand, lclNum))
{
result = true;
return GenTree::VisitResult::Abort;
}
return GenTree::VisitResult::Continue;
});
return result;
}
struct AddrTakenDsc
{
Compiler* comp;
bool hasAddrTakenLcl;
};
/* static */
Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
Compiler* comp = data->compiler;
if (tree->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
((AddrTakenDsc*)data->pCallbackData)->hasAddrTakenLcl = true;
return WALK_ABORT;
}
}
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Return true if this tree contains locals with lvHasLdAddrOp or IsAddressExposed()
* flag(s) set.
*/
bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree)
{
AddrTakenDsc desc;
desc.comp = this;
desc.hasAddrTakenLcl = false;
fgWalkTreePre(&tree, gtHasLocalsWithAddrOpCB, &desc);
return desc.hasAddrTakenLcl;
}
#ifdef DEBUG
/*****************************************************************************
*
* Helper used to compute hash values for trees.
*/
inline unsigned genTreeHashAdd(unsigned old, unsigned add)
{
return (old + old / 2) ^ add;
}
inline unsigned genTreeHashAdd(unsigned old, void* add)
{
return genTreeHashAdd(old, (unsigned)(size_t)add);
}
/*****************************************************************************
*
* Given an arbitrary expression tree, compute a hash value for it.
*/
unsigned Compiler::gtHashValue(GenTree* tree)
{
genTreeOps oper;
unsigned kind;
unsigned hash = 0;
GenTree* temp;
AGAIN:
assert(tree);
/* Figure out what kind of a node we have */
oper = tree->OperGet();
kind = tree->OperKind();
/* Include the operator value in the hash */
hash = genTreeHashAdd(hash, oper);
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
size_t add;
switch (oper)
{
UINT64 bits;
case GT_LCL_VAR:
add = tree->AsLclVar()->GetLclNum();
break;
case GT_LCL_FLD:
hash = genTreeHashAdd(hash, tree->AsLclFld()->GetLclNum());
add = tree->AsLclFld()->GetLclOffs();
break;
case GT_CNS_INT:
add = tree->AsIntCon()->gtIconVal;
break;
case GT_CNS_LNG:
bits = (UINT64)tree->AsLngCon()->gtLconVal;
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_DBL:
bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal);
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_STR:
add = tree->AsStrCon()->gtSconCPX;
break;
case GT_JMP:
add = tree->AsVal()->gtVal1;
break;
default:
add = 0;
break;
}
// clang-format off
// narrow 'add' into a 32-bit 'val'
unsigned val;
#ifdef HOST_64BIT
val = genTreeHashAdd(uhi32(add), ulo32(add));
#else // 32-bit host
val = add;
#endif
// clang-format on
hash = genTreeHashAdd(hash, val);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
GenTree* op1;
if (kind & GTK_UNOP)
{
op1 = tree->AsOp()->gtOp1;
/* Special case: no sub-operand at all */
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_ARR_LENGTH:
hash += tree->AsArrLen()->ArrLenOffset();
break;
case GT_CAST:
hash ^= tree->AsCast()->gtCastType;
break;
case GT_INDEX:
hash += tree->AsIndex()->gtIndElemSize;
break;
case GT_INDEX_ADDR:
hash += tree->AsIndexAddr()->gtElemSize;
break;
case GT_ALLOCOBJ:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsAllocObj()->gtAllocObjClsHnd)));
hash = genTreeHashAdd(hash, tree->AsAllocObj()->gtNewHelper);
break;
case GT_RUNTIMELOOKUP:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsRuntimeLookup()->gtHnd)));
break;
case GT_BLK:
case GT_OBJ:
hash =
genTreeHashAdd(hash,
static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->AsBlk()->GetLayout())));
break;
case GT_FIELD:
hash = genTreeHashAdd(hash, tree->AsField()->gtFldHnd);
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
if (!op1)
{
goto DONE;
}
tree = op1;
goto AGAIN;
}
if (kind & GTK_BINOP)
{
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
hash += tree->AsIntrinsic()->gtIntrinsicName;
break;
case GT_LEA:
hash += static_cast<unsigned>(tree->AsAddrMode()->Offset() << 3) + tree->AsAddrMode()->gtScale;
break;
case GT_BOUNDS_CHECK:
hash = genTreeHashAdd(hash, tree->AsBoundsChk()->gtThrowKind);
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
hash ^= PtrToUlong(tree->AsBlk()->GetLayout());
break;
// For the ones below no extra argument matters for comparison.
case GT_ARR_INDEX:
case GT_QMARK:
case GT_INDEX:
case GT_INDEX_ADDR:
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
hash += tree->AsSIMD()->GetSIMDIntrinsicId();
hash += tree->AsSIMD()->GetSimdBaseType();
hash += tree->AsSIMD()->GetSimdSize();
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
hash += tree->AsHWIntrinsic()->GetHWIntrinsicId();
hash += tree->AsHWIntrinsic()->GetSimdBaseType();
hash += tree->AsHWIntrinsic()->GetSimdSize();
hash += tree->AsHWIntrinsic()->GetAuxiliaryType();
hash += tree->AsHWIntrinsic()->GetOtherReg();
break;
#endif // FEATURE_HW_INTRINSICS
default:
assert(!"unexpected binary ExOp operator");
}
}
op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
/* Is there a second sub-operand? */
if (!op2)
{
/* Special case: no sub-operands at all */
if (!op1)
{
goto DONE;
}
/* This is a unary operator */
tree = op1;
goto AGAIN;
}
/* This is a binary operator */
unsigned hsh1 = gtHashValue(op1);
/* Add op1's hash to the running value and continue with op2 */
hash = genTreeHashAdd(hash, hsh1);
tree = op2;
goto AGAIN;
}
/* See what kind of a special operator we have here */
switch (tree->gtOper)
{
case GT_ARR_ELEM:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrObj));
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrInds[dim]));
}
break;
case GT_ARR_OFFSET:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtOffset));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtIndex));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtArrObj));
break;
case GT_CALL:
if ((tree->AsCall()->gtCallThisArg != nullptr) && !tree->AsCall()->gtCallThisArg->GetNode()->OperIs(GT_NOP))
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCall()->gtCallThisArg->GetNode()));
}
for (GenTreeCall::Use& use : tree->AsCall()->Args())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
temp = tree->AsCall()->gtCallAddr;
assert(temp);
hash = genTreeHashAdd(hash, gtHashValue(temp));
}
else
{
hash = genTreeHashAdd(hash, tree->AsCall()->gtCallMethHnd);
}
for (GenTreeCall::Use& use : tree->AsCall()->LateArgs())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
// TODO-List: rewrite with a general visitor / iterator?
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
hash = genTreeHashAdd(hash, gtHashValue(operand));
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_CMPXCHG:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpLocation));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpValue));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpComparand));
break;
case GT_STORE_DYN_BLK:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Data()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Addr()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->gtDynamicSize));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
assert(!"unexpected operator");
break;
}
DONE:
return hash;
}
#endif // DEBUG
/*****************************************************************************
*
* Return a relational operator that is the reverse of the given one.
*/
/* static */
genTreeOps GenTree::ReverseRelop(genTreeOps relop)
{
static const genTreeOps reverseOps[] = {
GT_NE, // GT_EQ
GT_EQ, // GT_NE
GT_GE, // GT_LT
GT_GT, // GT_LE
GT_LT, // GT_GE
GT_LE, // GT_GT
GT_TEST_NE, // GT_TEST_EQ
GT_TEST_EQ, // GT_TEST_NE
};
assert(reverseOps[GT_EQ - GT_EQ] == GT_NE);
assert(reverseOps[GT_NE - GT_EQ] == GT_EQ);
assert(reverseOps[GT_LT - GT_EQ] == GT_GE);
assert(reverseOps[GT_LE - GT_EQ] == GT_GT);
assert(reverseOps[GT_GE - GT_EQ] == GT_LT);
assert(reverseOps[GT_GT - GT_EQ] == GT_LE);
assert(reverseOps[GT_TEST_EQ - GT_EQ] == GT_TEST_NE);
assert(reverseOps[GT_TEST_NE - GT_EQ] == GT_TEST_EQ);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(reverseOps));
return reverseOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Return a relational operator that will work for swapped operands.
*/
/* static */
genTreeOps GenTree::SwapRelop(genTreeOps relop)
{
static const genTreeOps swapOps[] = {
GT_EQ, // GT_EQ
GT_NE, // GT_NE
GT_GT, // GT_LT
GT_GE, // GT_LE
GT_LE, // GT_GE
GT_LT, // GT_GT
GT_TEST_EQ, // GT_TEST_EQ
GT_TEST_NE, // GT_TEST_NE
};
assert(swapOps[GT_EQ - GT_EQ] == GT_EQ);
assert(swapOps[GT_NE - GT_EQ] == GT_NE);
assert(swapOps[GT_LT - GT_EQ] == GT_GT);
assert(swapOps[GT_LE - GT_EQ] == GT_GE);
assert(swapOps[GT_GE - GT_EQ] == GT_LE);
assert(swapOps[GT_GT - GT_EQ] == GT_LT);
assert(swapOps[GT_TEST_EQ - GT_EQ] == GT_TEST_EQ);
assert(swapOps[GT_TEST_NE - GT_EQ] == GT_TEST_NE);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(swapOps));
return swapOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Reverse the meaning of the given test condition.
*/
GenTree* Compiler::gtReverseCond(GenTree* tree)
{
if (tree->OperIsCompare())
{
tree->SetOper(GenTree::ReverseRelop(tree->OperGet()));
// Flip the GTF_RELOP_NAN_UN bit
// a ord b === (a != NaN && b != NaN)
// a unord b === (a == NaN || b == NaN)
// => !(a ord b) === (a unord b)
if (varTypeIsFloating(tree->AsOp()->gtOp1->TypeGet()))
{
tree->gtFlags ^= GTF_RELOP_NAN_UN;
}
}
else if (tree->OperIs(GT_JCC, GT_SETCC))
{
GenTreeCC* cc = tree->AsCC();
cc->gtCondition = GenCondition::Reverse(cc->gtCondition);
}
else if (tree->OperIs(GT_JCMP))
{
// Flip the GTF_JCMP_EQ
//
// This causes switching
// cbz <=> cbnz
// tbz <=> tbnz
tree->gtFlags ^= GTF_JCMP_EQ;
}
else
{
tree = gtNewOperNode(GT_NOT, TYP_INT, tree);
}
return tree;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
//------------------------------------------------------------------------------
// IsValidLongMul : Check for long multiplication with 32 bit operands.
//
// Recognizes the following tree: MUL(CAST(long <- int), CAST(long <- int) or CONST),
// where CONST must be an integer constant that fits in 32 bits. Will try to detect
// cases when the multiplication cannot overflow and return "true" for them.
//
// This function does not change the state of the tree and is usable in LIR.
//
// Return Value:
// Whether this GT_MUL tree is a valid long multiplication candidate.
//
bool GenTreeOp::IsValidLongMul()
{
assert(OperIs(GT_MUL));
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
if (!TypeIs(TYP_LONG))
{
return false;
}
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())))
{
return false;
}
if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
!(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
{
return false;
}
if (op1->gtOverflow() || op2->gtOverflowEx())
{
return false;
}
if (gtOverflow())
{
auto getMaxValue = [this](GenTree* op) -> int64_t {
if (op->OperIs(GT_CAST))
{
if (op->IsUnsigned())
{
switch (op->AsCast()->CastOp()->TypeGet())
{
case TYP_UBYTE:
return UINT8_MAX;
case TYP_USHORT:
return UINT16_MAX;
default:
return UINT32_MAX;
}
}
return IsUnsigned() ? static_cast<int64_t>(UINT64_MAX) : INT32_MIN;
}
return op->AsIntConCommon()->IntegralValue();
};
int64_t maxOp1 = getMaxValue(op1);
int64_t maxOp2 = getMaxValue(op2);
if (CheckedOps::MulOverflows(maxOp1, maxOp2, IsUnsigned()))
{
return false;
}
}
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
return false;
}
return true;
}
#if !defined(TARGET_64BIT) && defined(DEBUG)
//------------------------------------------------------------------------------
// DebugCheckLongMul : Checks that a GTF_MUL_64RSLT tree is a valid MUL_LONG.
//
// Notes:
// This function is defined for 32 bit targets only because we *must* maintain
// the MUL_LONG-compatible tree shape throughout the compilation from morph to
// decomposition, since we do not have (great) ability to create new calls in LIR.
//
// It is for this reason that we recognize MUL_LONGs early in morph, mark them with
// a flag and then pessimize various places (e. g. assertion propagation) to not look
// at them. In contrast, on ARM64 we recognize MUL_LONGs late, in lowering, and thus
// do not need this function.
//
void GenTreeOp::DebugCheckLongMul()
{
assert(OperIs(GT_MUL));
assert(Is64RsltMul());
assert(TypeIs(TYP_LONG));
assert(!gtOverflow());
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
// op1 has to be CAST(long <- int)
assert(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp()));
assert(!op1->gtOverflow());
// op2 has to be CAST(long <- int) or a suitably small constant.
assert((op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) ||
(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())));
assert(!op2->gtOverflowEx());
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
assert((op1ZeroExtends == op2ZeroExtends) || op2AnyExtensionIsSuitable);
// Do unsigned mul iff both operands are zero-extending.
assert(op1->IsUnsigned() == IsUnsigned());
}
#endif // !defined(TARGET_64BIT) && defined(DEBUG)
#endif // !defined(TARGET_64BIT) || defined(TARGET_ARM64)
unsigned Compiler::gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz)
{
unsigned level = 0;
unsigned costEx = 0;
unsigned costSz = 0;
for (GenTreeCall::Use& use : args)
{
GenTree* argNode = use.GetNode();
unsigned argLevel = gtSetEvalOrder(argNode);
if (argLevel > level)
{
level = argLevel;
}
if (argNode->GetCostEx() != 0)
{
costEx += argNode->GetCostEx();
costEx += lateArgs ? 0 : IND_COST_EX;
}
if (argNode->GetCostSz() != 0)
{
costSz += argNode->GetCostSz();
#ifdef TARGET_XARCH
if (lateArgs) // push is smaller than mov to reg
#endif
{
costSz += 1;
}
}
}
*callCostEx += costEx;
*callCostSz += costSz;
return level;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// gtSetMultiOpOrder: Calculate the costs for a MultiOp.
//
// Currently this function just preserves the previous behavior.
// TODO-List-Cleanup: implement proper costing for these trees.
//
// Arguments:
// multiOp - The MultiOp tree in question
//
// Return Value:
// The Sethi "complexity" for this tree (the idealized number of
// registers needed to evaluate it).
//
unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
{
// These default costs preserve previous behavior.
// TODO-CQ: investigate opportunities for tuning them.
int costEx = 1;
int costSz = 1;
unsigned level = 0;
unsigned lvl2 = 0;
#if defined(FEATURE_HW_INTRINSICS)
if (multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hwTree = multiOp->AsHWIntrinsic();
#if defined(TARGET_XARCH)
if ((hwTree->GetOperandCount() == 1) && hwTree->OperIsMemoryLoadOrStore())
{
costEx = IND_COST_EX;
costSz = 2;
GenTree* const addrNode = hwTree->Op(1);
level = gtSetEvalOrder(addrNode);
GenTree* const addr = addrNode->gtEffectiveVal();
// See if we can form a complex addressing mode.
if (addr->OperIs(GT_ADD) && gtMarkAddrMode(addr, &costEx, &costSz, hwTree->TypeGet()))
{
// Nothing to do, costs have been set.
}
else
{
costEx += addr->GetCostEx();
costSz += addr->GetCostSz();
}
hwTree->SetCosts(costEx, costSz);
return level;
}
#endif
switch (hwTree->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_Vector128_Create:
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
case NI_Vector128_Create:
#endif
{
if ((hwTree->GetOperandCount() == 1) && hwTree->Op(1)->OperIsConst())
{
// Vector.Create(cns) is cheap but not that cheap to be (1,1)
costEx = IND_COST_EX;
costSz = 2;
level = gtSetEvalOrder(hwTree->Op(1));
hwTree->SetCosts(costEx, costSz);
return level;
}
break;
}
default:
break;
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// This code is here to preserve previous behavior.
switch (multiOp->GetOperandCount())
{
case 0:
// This is a constant HWIntrinsic, we already have correct costs.
break;
case 1:
// A "unary" case.
level = gtSetEvalOrder(multiOp->Op(1));
costEx += multiOp->Op(1)->GetCostEx();
costSz += multiOp->Op(1)->GetCostSz();
break;
case 2:
// A "binary" case.
// This way we have "level" be the complexity of the
// first tree to be evaluated, and "lvl2" - the second.
if (multiOp->IsReverseOp())
{
level = gtSetEvalOrder(multiOp->Op(2));
lvl2 = gtSetEvalOrder(multiOp->Op(1));
}
else
{
level = gtSetEvalOrder(multiOp->Op(1));
lvl2 = gtSetEvalOrder(multiOp->Op(2));
}
// We want the more complex tree to be evaluated first.
if (level < lvl2)
{
bool canSwap = multiOp->IsReverseOp() ? gtCanSwapOrder(multiOp->Op(2), multiOp->Op(1))
: gtCanSwapOrder(multiOp->Op(1), multiOp->Op(2));
if (canSwap)
{
if (multiOp->IsReverseOp())
{
multiOp->ClearReverseOp();
}
else
{
multiOp->SetReverseOp();
}
std::swap(level, lvl2);
}
}
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
break;
default:
// The former "ArgList" case... we'll be emulating it here.
// The old implementation pushed the nodes on the list, in pre-order.
// Then it popped and costed them in "reverse order", so that's what
// we'll be doing here as well.
unsigned nxtlvl = 0;
for (size_t i = multiOp->GetOperandCount(); i >= 1; i--)
{
GenTree* op = multiOp->Op(i);
unsigned lvl = gtSetEvalOrder(op);
if (lvl < 1)
{
level = nxtlvl;
}
else if (lvl == nxtlvl)
{
level = lvl + 1;
}
else
{
level = lvl;
}
costEx += op->GetCostEx();
costSz += op->GetCostSz();
// Preserving previous behavior...
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_XARCH
if (op->GetCostSz() != 0)
{
costSz += 1;
}
#endif
nxtlvl = level;
}
break;
}
multiOp->SetCosts(costEx, costSz);
return level;
}
#endif
//-----------------------------------------------------------------------------
// gtWalkOp: Traverse and mark an address expression
//
// Arguments:
// op1WB - An out parameter which is either the address expression, or one
// of its operands.
// op2WB - An out parameter which starts as either null or one of the operands
// of the address expression.
// base - The base address of the addressing mode, or null if 'constOnly' is false
// constOnly - True if we will only traverse into ADDs with constant op2.
//
// This routine is a helper routine for gtSetEvalOrder() and is used to identify the
// base and index nodes, which will be validated against those identified by
// genCreateAddrMode().
// It also marks the ADD nodes involved in the address expression with the
// GTF_ADDRMODE_NO_CSE flag which prevents them from being considered for CSE's.
//
// Its two output parameters are modified under the following conditions:
//
// It is called once with the original address expression as 'op1WB', and
// with 'constOnly' set to false. On this first invocation, *op1WB is always
// an ADD node, and it will consider the operands of the ADD even if its op2 is
// not a constant. However, when it encounters a non-constant or the base in the
// op2 position, it stops iterating. That operand is returned in the 'op2WB' out
// parameter, and will be considered on the third invocation of this method if
// it is an ADD.
//
// It is called the second time with the two operands of the original expression, in
// the original order, and the third time in reverse order. For these invocations
// 'constOnly' is true, so it will only traverse cascaded ADD nodes if they have a
// constant op2.
//
// The result, after three invocations, is that the values of the two out parameters
// correspond to the base and index in some fashion. This method doesn't attempt
// to determine or validate the scale or offset, if any.
//
// Assumptions (presumed to be ensured by genCreateAddrMode()):
// If an ADD has a constant operand, it is in the op2 position.
//
// Notes:
// This method, and its invocation sequence, are quite confusing, and since they
// were not originally well-documented, this specification is a possibly-imperfect
// reconstruction.
// The motivation for the handling of the NOP case is unclear.
// Note that 'op2WB' is only modified in the initial (!constOnly) case,
// or if a NOP is encountered in the op1 position.
//
void Compiler::gtWalkOp(GenTree** op1WB, GenTree** op2WB, GenTree* base, bool constOnly)
{
GenTree* op1 = *op1WB;
GenTree* op2 = *op2WB;
op1 = op1->gtEffectiveVal();
// Now we look for op1's with non-overflow GT_ADDs [of constants]
while ((op1->gtOper == GT_ADD) && (!op1->gtOverflow()) && (!constOnly || (op1->AsOp()->gtOp2->IsCnsIntOrI())))
{
// mark it with GTF_ADDRMODE_NO_CSE
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (!constOnly)
{
op2 = op1->AsOp()->gtOp2;
}
op1 = op1->AsOp()->gtOp1;
// If op1 is a GT_NOP then swap op1 and op2.
// (Why? Also, presumably op2 is not a GT_NOP in this case?)
if (op1->gtOper == GT_NOP)
{
GenTree* tmp;
tmp = op1;
op1 = op2;
op2 = tmp;
}
if (!constOnly && ((op2 == base) || (!op2->IsCnsIntOrI())))
{
break;
}
op1 = op1->gtEffectiveVal();
}
*op1WB = op1;
*op2WB = op2;
}
#ifdef DEBUG
/*****************************************************************************
* This is a workaround. It is to help implement an assert in gtSetEvalOrder() that the values
* gtWalkOp() leaves in op1 and op2 correspond with the values of adr, idx, mul, and cns
* that are returned by genCreateAddrMode(). It's essentially impossible to determine
* what gtWalkOp() *should* return for all possible trees. This simply loosens one assert
* to handle the following case:
indir int
const(h) int 4 field
+ byref
lclVar byref V00 this <-- op2
comma byref <-- adr (base)
indir byte
lclVar byref V00 this
+ byref
const int 2 <-- mul == 4
<< int <-- op1
lclVar int V01 arg1 <-- idx
* Here, we are planning to generate the address mode [edx+4*eax], where eax = idx and edx = the GT_COMMA expression.
* To check adr equivalence with op2, we need to walk down the GT_ADD tree just like gtWalkOp() does.
*/
GenTree* Compiler::gtWalkOpEffectiveVal(GenTree* op)
{
for (;;)
{
op = op->gtEffectiveVal();
if ((op->gtOper != GT_ADD) || op->gtOverflow() || !op->AsOp()->gtOp2->IsCnsIntOrI())
{
break;
}
op = op->AsOp()->gtOp1;
}
return op;
}
#endif // DEBUG
/*****************************************************************************
*
* Given a tree, set the GetCostEx and GetCostSz() fields which
* are used to measure the relative costs of the codegen of the tree
*
*/
void Compiler::gtPrepareCost(GenTree* tree)
{
gtSetEvalOrder(tree);
}
bool Compiler::gtIsLikelyRegVar(GenTree* tree)
{
if (tree->gtOper != GT_LCL_VAR)
{
return false;
}
const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varDsc->lvDoNotEnregister)
{
return false;
}
// If this is an EH-live var, return false if it is a def,
// as it will have to go to memory.
if (varDsc->lvLiveInOutOfHndlr && ((tree->gtFlags & GTF_VAR_DEF) != 0))
{
return false;
}
// Be pessimistic if ref counts are not yet set up.
//
// Perhaps we should be optimistic though.
// See notes in GitHub issue 18969.
if (!lvaLocalVarRefCounted())
{
return false;
}
if (varDsc->lvRefCntWtd() < (BB_UNITY_WEIGHT * 3))
{
return false;
}
#ifdef TARGET_X86
if (varTypeUsesFloatReg(tree->TypeGet()))
return false;
if (varTypeIsLong(tree->TypeGet()))
return false;
#endif
return true;
}
//------------------------------------------------------------------------
// gtCanSwapOrder: Returns true iff the secondNode can be swapped with firstNode.
//
// Arguments:
// firstNode - An operand of a tree that can have GTF_REVERSE_OPS set.
// secondNode - The other operand of the tree.
//
// Return Value:
// Returns a boolean indicating whether it is safe to reverse the execution
// order of the two trees, considering any exception, global effects, or
// ordering constraints.
//
bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// Relative of order of global / side effects can't be swapped.
bool canSwap = true;
if (optValnumCSE_phase)
{
canSwap = optCSE_canSwap(firstNode, secondNode);
}
// We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
if (canSwap && (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
{
canSwap = false;
}
// When strict side effect order is disabled we allow GTF_REVERSE_OPS to be set
// when one or both sides contains a GTF_CALL or GTF_EXCEPT.
// Currently only the C and C++ languages allow non strict side effect order.
unsigned strictEffects = GTF_GLOB_EFFECT;
if (canSwap && (firstNode->gtFlags & strictEffects))
{
// op1 has side efects that can't be reordered.
// Check for some special cases where we still may be able to swap.
if (secondNode->gtFlags & strictEffects)
{
// op2 has also has non reorderable side effects - can't swap.
canSwap = false;
}
else
{
// No side effects in op2 - we can swap iff op1 has no way of modifying op2,
// i.e. through byref assignments or calls or op2 is a constant.
if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
{
// We have to be conservative - can swap iff op2 is constant.
if (!secondNode->IsInvariant())
{
canSwap = false;
}
}
}
}
return canSwap;
}
//------------------------------------------------------------------------
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
//
// Arguments:
// addr - The address expression
// costEx - The execution cost of this address expression (in/out arg to be updated)
// costEx - The size cost of this address expression (in/out arg to be updated)
// type - The type of the value being referenced by the parent of this address expression.
//
// Return Value:
// Returns true if it finds an addressing mode.
//
// Notes:
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
//
bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_types type)
{
// These are "out" parameters on the call to genCreateAddrMode():
bool rev; // This will be true if the operands will need to be reversed. At this point we
// don't care about this because we're not yet instantiating this addressing mode.
unsigned mul; // This is the index (scale) value for the addressing mode
ssize_t cns; // This is the constant offset
GenTree* base; // This is the base of the address.
GenTree* idx; // This is the index.
if (codeGen->genCreateAddrMode(addr, false /*fold*/, &rev, &base, &idx, &mul, &cns))
{
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((mul > 0) && (genTypeSize(type) != mul))
{
return false;
}
#endif
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
#ifdef TARGET_XARCH
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
// all of these: { base, idx, cns, mul }
//
unsigned addrmodeCount = 0;
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
addrmodeCount++;
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
addrmodeCount++;
}
if (cns)
{
if (((signed char)cns) == ((int)cns))
{
*pCostSz += 1;
}
else
{
*pCostSz += 4;
}
addrmodeCount++;
}
if (mul)
{
addrmodeCount++;
}
// When we form a complex addressing mode we can reduced the costs
// associated with the interior GT_ADD and GT_LSH nodes:
//
// GT_ADD -- reduce this interior GT_ADD by (-3,-3)
// / \ --
// GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
// / \ --
// 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
// / \ --
// 'idx' 'mul'
//
if (addrmodeCount > 1)
{
// The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
//
addrmodeCount--;
GenTree* tmp = addr;
while (addrmodeCount > 0)
{
// decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining
// addrmodeCount
tmp->SetCosts(tmp->GetCostEx() - addrmodeCount, tmp->GetCostSz() - addrmodeCount);
addrmodeCount--;
if (addrmodeCount > 0)
{
GenTree* tmpOp1 = tmp->AsOp()->gtOp1;
GenTree* tmpOp2 = tmp->gtGetOp2();
assert(tmpOp2 != nullptr);
if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_LSH)
{
tmp = tmpOp2;
}
else if (tmpOp1->OperGet() == GT_LSH)
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_ADD)
{
tmp = tmpOp2;
}
else
{
// We can very rarely encounter a tree that has a GT_COMMA node
// that is difficult to walk, so we just early out without decrementing.
addrmodeCount = 0;
}
}
}
}
#elif defined TARGET_ARM
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
{
*pCostSz -= 1;
}
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
if (mul > 0)
{
*pCostSz += 2;
}
}
if (cns)
{
if (cns >= 128) // small offsets fits into a 16-bit instruction
{
if (cns < 4096) // medium offsets require a 32-bit instruction
{
if (!varTypeIsFloating(type))
{
*pCostSz += 2;
}
}
else
{
*pCostEx += 2; // Very large offsets require movw/movt instructions
*pCostSz += 8;
}
}
}
#elif defined TARGET_ARM64
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (cns >= (4096 * genTypeSize(type)))
{
*pCostEx += 1;
*pCostSz += 4;
}
}
#else
#error "Unknown TARGET"
#endif
assert(addr->gtOper == GT_ADD);
assert(!addr->gtOverflow());
assert(mul != 1);
// If we have an addressing mode, we have one of:
// [base + cns]
// [ idx * mul ] // mul >= 2, else we would use base instead of idx
// [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
// [base + idx * mul ] // mul can be 0, 2, 4, or 8
// [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
assert((base != nullptr) || (idx != nullptr && mul >= 2));
INDEBUG(GenTree* op1Save = addr);
// Walk 'addr' identifying non-overflow ADDs that will be part of the address mode.
// Note that we will be modifying 'op1' and 'op2' so that eventually they should
// map to the base and index.
GenTree* op1 = addr;
GenTree* op2 = nullptr;
gtWalkOp(&op1, &op2, base, false);
// op1 and op2 are now descendents of the root GT_ADD of the addressing mode.
assert(op1 != op1Save);
assert(op2 != nullptr);
#if defined(TARGET_XARCH)
// Walk the operands again (the third operand is unused in this case).
// This time we will only consider adds with constant op2's, since
// we have already found either a non-ADD op1 or a non-constant op2.
// NOTE: we don't support ADD(op1, cns) addressing for ARM/ARM64 yet so
// this walk makes no sense there.
gtWalkOp(&op1, &op2, nullptr, true);
// For XARCH we will fold GT_ADDs in the op2 position into the addressing mode, so we call
// gtWalkOp on both operands of the original GT_ADD.
// This is not done for ARMARCH. Though the stated reason is that we don't try to create a
// scaled index, in fact we actually do create them (even base + index*scale + offset).
// At this point, 'op2' may itself be an ADD of a constant that should be folded
// into the addressing mode.
// Walk op2 looking for non-overflow GT_ADDs of constants.
gtWalkOp(&op2, &op1, nullptr, true);
#endif // defined(TARGET_XARCH)
// OK we are done walking the tree
// Now assert that op1 and op2 correspond with base and idx
// in one of the several acceptable ways.
// Note that sometimes op1/op2 is equal to idx/base
// and other times op1/op2 is a GT_COMMA node with
// an effective value that is idx/base
if (mul > 1)
{
if ((op1 != base) && (op1->gtOper == GT_LSH))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1->AsOp()->gtOp1->gtOper == GT_MUL)
{
op1->AsOp()->gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
assert((base == nullptr) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
(gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
else
{
assert(op2 != nullptr);
assert(op2->OperIs(GT_LSH, GT_MUL));
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
// We may have eliminated multiple shifts and multiplies in the addressing mode,
// so navigate down through them to get to "idx".
GenTree* op2op1 = op2->AsOp()->gtOp1;
while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
op2op1 = op2op1->AsOp()->gtOp1;
}
assert(op1->gtEffectiveVal() == base);
assert(op2op1 == idx);
}
}
else
{
assert(mul == 0);
if ((op1 == idx) || (op1->gtEffectiveVal() == idx))
{
if (idx != nullptr)
{
if ((op1->gtOper == GT_MUL) || (op1->gtOper == GT_LSH))
{
GenTree* op1op1 = op1->AsOp()->gtOp1;
if ((op1op1->gtOper == GT_NOP) ||
(op1op1->gtOper == GT_MUL && op1op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1op1->gtOper == GT_MUL)
{
op1op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
}
assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
else if ((op1 == base) || (op1->gtEffectiveVal() == base))
{
if (idx != nullptr)
{
assert(op2 != nullptr);
if (op2->OperIs(GT_MUL, GT_LSH))
{
GenTree* op2op1 = op2->AsOp()->gtOp1;
if ((op2op1->gtOper == GT_NOP) ||
(op2op1->gtOper == GT_MUL && op2op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op2op1->gtOper == GT_MUL)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
}
}
else
{
// op1 isn't base or idx. Is this possible? Or should there be an assert?
}
}
return true;
} // end if (genCreateAddrMode(...))
return false;
}
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
* evaluated. If the second operand of a binary operator is more expensive
* than the first operand, then try to swap the operand trees. Updates the
* GTF_REVERSE_OPS bit if necessary in this case.
*
* Returns the Sethi 'complexity' estimate for this tree (the higher
* the number, the higher is the tree's resources requirement).
*
* This function sets:
* 1. GetCostEx() to the execution complexity estimate
* 2. GetCostSz() to the code size estimate
* 3. Sometimes sets GTF_ADDRMODE_NO_CSE on nodes in the tree.
* 4. DEBUG-only: clears GTF_DEBUG_NODE_MORPHED.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
#ifdef DEBUG
/* Clear the GTF_DEBUG_NODE_MORPHED flag as well */
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
/* Is this a FP value? */
bool isflt = varTypeIsFloating(tree->TypeGet());
/* Figure out what kind of a node we have */
const genTreeOps oper = tree->OperGet();
const unsigned kind = tree->OperKind();
/* Assume no fixed registers will be trashed */
unsigned level;
int costEx;
int costSz;
#ifdef DEBUG
costEx = -1;
costSz = -1;
#endif
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
#ifdef TARGET_ARM
case GT_CNS_STR:
// Uses movw/movt
costSz = 8;
costEx = 2;
goto COMMON_CNS;
case GT_CNS_LNG:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
INT64 lngVal = con->LngValue();
INT32 loVal = (INT32)(lngVal & 0xffffffff);
INT32 hiVal = (INT32)(lngVal >> 32);
if (lngVal == 0)
{
costSz = 1;
costEx = 1;
}
else
{
// Minimum of one instruction to setup hiVal,
// and one instruction to setup loVal
costSz = 4 + 4;
costEx = 1 + 1;
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
}
goto COMMON_CNS;
}
case GT_CNS_INT:
{
// If the constant is a handle then it will need to have a relocation
// applied to it.
// Any constant that requires a reloc must use the movw/movt sequence
//
GenTreeIntConCommon* con = tree->AsIntConCommon();
target_ssize_t conVal = (target_ssize_t)con->IconValue();
if (con->ImmedValNeedsReloc(this))
{
// Requires movw/movt
costSz = 8;
costEx = 2;
}
else if (codeGen->validImmForInstr(INS_add, conVal))
{
// Typically included with parent oper
costSz = 2;
costEx = 1;
}
else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
{
// Uses mov or mvn
costSz = 4;
costEx = 1;
}
else
{
// Needs movw/movt
costSz = 8;
costEx = 2;
}
goto COMMON_CNS;
}
#elif defined TARGET_XARCH
case GT_CNS_STR:
#ifdef TARGET_AMD64
costSz = 10;
costEx = 2;
#else // TARGET_X86
costSz = 4;
costEx = 1;
#endif
goto COMMON_CNS;
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue();
bool fitsInVal = true;
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
INT64 lngVal = con->LngValue();
conVal = (ssize_t)lngVal; // truncate to 32-bits
fitsInVal = ((INT64)conVal == lngVal);
}
#endif // TARGET_X86
// If the constant is a handle then it will need to have a relocation
// applied to it.
//
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
if (iconNeedsReloc)
{
costSz = 4;
costEx = 1;
}
else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal))
{
costSz = 1;
costEx = 1;
}
#ifdef TARGET_AMD64
else if (!GenTreeIntConCommon::FitsInI32(conVal))
{
costSz = 10;
costEx = 2;
}
#endif // TARGET_AMD64
else
{
costSz = 4;
costEx = 1;
}
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
costSz += fitsInVal ? 1 : 4;
costEx += 1;
}
#endif // TARGET_X86
goto COMMON_CNS;
}
#elif defined(TARGET_ARM64)
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
INT64 imm = con->LngValue();
emitAttr size = EA_SIZE(emitActualTypeSize(tree));
if (iconNeedsReloc)
{
costSz = 8;
costEx = 2;
}
else if (emitter::emitIns_valid_imm_for_add(imm, size))
{
costSz = 2;
costEx = 1;
}
else if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
costSz = 4;
costEx = 1;
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting
// the register
// In some cases it is preferable to use movn, because it has the side effect of filling the
// other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
bool preferMovz = false;
bool preferMovn = false;
int instructionCount = 4;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (!preferMovn && (uint16_t(imm >> i) == 0x0000))
{
preferMovz = true; // by using a movk to start we can save one instruction
instructionCount--;
}
else if (!preferMovz && (uint16_t(imm >> i) == 0xffff))
{
preferMovn = true; // by using a movn to start we can save one instruction
instructionCount--;
}
}
costEx = instructionCount;
costSz = 4 * instructionCount;
}
}
goto COMMON_CNS;
#else
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
#error "Unknown TARGET"
#endif
COMMON_CNS:
/*
Note that some code below depends on constants always getting
moved to be the second operand of a binary operator. This is
easily accomplished by giving constants a level of 0, which
we do on the next line. If you ever decide to change this, be
aware that unless you make other arrangements for integer
constants to be moved, stuff will break.
*/
level = 0;
break;
case GT_CNS_DBL:
{
level = 0;
#if defined(TARGET_XARCH)
/* We use fldz and fld1 to load 0.0 and 1.0, but all other */
/* floating point constants are loaded using an indirection */
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
(*((__int64*)&(tree->AsDblCon()->gtDconVal)) == I64(0x3ff0000000000000)))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#elif defined(TARGET_ARM)
var_types targetType = tree->TypeGet();
if (targetType == TYP_FLOAT)
{
costEx = 1 + 2;
costSz = 2 + 4;
}
else
{
assert(targetType == TYP_DOUBLE);
costEx = 1 + 4;
costSz = 2 + 8;
}
#elif defined(TARGET_ARM64)
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->gtDconVal))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#else
#error "Unknown TARGET"
#endif
}
break;
case GT_LCL_VAR:
level = 1;
if (gtIsLikelyRegVar(tree))
{
costEx = 1;
costSz = 1;
/* Sign-extend and zero-extend are more expensive to load */
if (lvaTable[tree->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
costEx += 1;
costSz += 1;
}
}
else
{
costEx = IND_COST_EX;
costSz = 2;
/* Sign-extend and zero-extend are more expensive to load */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
}
#if defined(TARGET_AMD64)
// increase costSz for floating point locals
if (isflt)
{
costSz += 1;
if (!gtIsLikelyRegVar(tree))
{
costSz += 1;
}
}
#endif
break;
case GT_CLS_VAR:
#ifdef TARGET_ARM
// We generate movw/movt/ldr
level = 1;
costEx = 3 + IND_COST_EX; // 6
costSz = 4 + 4 + 2; // 10
break;
#endif
case GT_LCL_FLD:
level = 1;
costEx = IND_COST_EX;
costSz = 4;
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
level = 1;
costEx = 3;
costSz = 3;
break;
case GT_PHI_ARG:
case GT_ARGPLACE:
level = 0;
costEx = 0;
costSz = 0;
break;
default:
level = 1;
costEx = 1;
costSz = 1;
break;
}
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
int lvlb; // preference for op2
unsigned lvl2; // scratch variable
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
costEx = 0;
costSz = 0;
if (tree->OperIsAddrMode())
{
if (op1 == nullptr)
{
op1 = op2;
op2 = nullptr;
}
}
/* Check for a nilary operator */
if (op1 == nullptr)
{
assert(op2 == nullptr);
level = 0;
goto DONE;
}
/* Is this a unary operator? */
if (op2 == nullptr)
{
/* Process the operand of the operator */
/* Most Unary ops have costEx of 1 */
costEx = 1;
costSz = 1;
level = gtSetEvalOrder(op1);
GenTreeIntrinsic* intrinsic;
/* Special handling for some operators */
switch (oper)
{
case GT_JTRUE:
costEx = 2;
costSz = 2;
break;
case GT_SWITCH:
costEx = 10;
costSz = 5;
break;
case GT_CAST:
#if defined(TARGET_ARM)
costEx = 1;
costSz = 1;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 3;
costSz = 4;
}
#elif defined(TARGET_ARM64)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 2;
costSz = 4;
}
#elif defined(TARGET_XARCH)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
/* cast involving floats always go through memory */
costEx = IND_COST_EX * 2;
costSz = 6;
}
#else
#error "Unknown TARGET"
#endif
/* Overflow casts are a lot more expensive */
if (tree->gtOverflow())
{
costEx += 6;
costSz += 6;
}
break;
case GT_NOP:
costEx = 0;
costSz = 0;
break;
case GT_INTRINSIC:
intrinsic = tree->AsIntrinsic();
// named intrinsic
assert(intrinsic->gtIntrinsicName != NI_Illegal);
// GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
// TODO: tune these costs target specific as some of these are
// target intrinsics and would cost less to generate code.
switch (intrinsic->gtIntrinsicName)
{
default:
assert(!"missing case for gtIntrinsicName");
costEx = 12;
costSz = 12;
break;
case NI_System_Math_Abs:
costEx = 5;
costSz = 15;
break;
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls. We don't do this for target intrinsics
// however as they typically represent single instruction calls
if (IsIntrinsicImplementedByUserCall(intrinsic->gtIntrinsicName))
{
costEx = 36;
costSz = 4;
}
else
{
costEx = 3;
costSz = 4;
}
break;
}
case NI_System_Object_GetType:
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls.
costEx = 36;
costSz = 4;
break;
}
level++;
break;
case GT_NOT:
case GT_NEG:
// We need to ensure that -x is evaluated before x or else
// we get burned while adjusting genFPstkLevel in x*-x where
// the rhs x is the last use of the enregistered x.
//
// Even in the integer case we want to prefer to
// evaluate the side without the GT_NEG node, all other things
// being equal. Also a GT_NOT requires a scratch register
level++;
break;
case GT_ADDR:
costEx = 0;
costSz = 1;
// If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
if (op1->OperGet() == GT_IND)
{
GenTree* indOp1 = op1->AsOp()->gtOp1;
costEx = indOp1->GetCostEx();
costSz = indOp1->GetCostSz();
}
break;
case GT_ARR_LENGTH:
level++;
/* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
costEx = IND_COST_EX - 1;
costSz = 2;
break;
case GT_MKREFANY:
case GT_OBJ:
// We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BOX:
// We estimate the cost of a GT_BOX to be two stores (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BLK:
case GT_IND:
/* An indirection should always have a non-zero level.
* Only constant leaf nodes have level 0.
*/
if (level == 0)
{
level = 1;
}
/* Indirections have a costEx of IND_COST_EX */
costEx = IND_COST_EX;
costSz = 2;
/* If we have to sign-extend or zero-extend, bump the cost */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
if (isflt)
{
if (tree->TypeGet() == TYP_DOUBLE)
{
costEx += 1;
}
#ifdef TARGET_ARM
costSz += 2;
#endif // TARGET_ARM
}
// Can we form an addressing mode with this indirection?
// TODO-CQ: Consider changing this to op1->gtEffectiveVal() to take into account
// addressing modes hidden under a comma node.
if (op1->gtOper == GT_ADD)
{
// See if we can form a complex addressing mode.
GenTree* addr = op1->gtEffectiveVal();
bool doAddrMode = true;
// See if we can form a complex addressing mode.
// Always use an addrMode for an array index indirection.
// TODO-1stClassStructs: Always do this, but first make sure it's
// done in Lowering as well.
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
if (tree->TypeGet() == TYP_STRUCT)
{
doAddrMode = false;
}
else if (varTypeIsStruct(tree))
{
// This is a heuristic attempting to match prior behavior when indirections
// under a struct assignment would not be considered for addressing modes.
if (compCurStmt != nullptr)
{
GenTree* expr = compCurStmt->GetRootNode();
if ((expr->OperGet() == GT_ASG) &&
((expr->gtGetOp1() == tree) || (expr->gtGetOp2() == tree)))
{
doAddrMode = false;
}
}
}
}
#ifdef TARGET_ARM64
if (tree->gtFlags & GTF_IND_VOLATILE)
{
// For volatile store/loads when address is contained we always emit `dmb`
// if it's not - we emit one-way barriers i.e. ldar/stlr
doAddrMode = false;
}
#endif // TARGET_ARM64
if (doAddrMode && gtMarkAddrMode(addr, &costEx, &costSz, tree->TypeGet()))
{
goto DONE;
}
} // end if (op1->gtOper == GT_ADD)
else if (gtIsLikelyRegVar(op1))
{
/* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
goto DONE;
}
#ifdef TARGET_XARCH
else if (op1->IsCnsIntOrI())
{
// Indirection of a CNS_INT, subtract 1 from costEx
// makes costEx 3 for x86 and 4 for amd64
//
costEx += (op1->GetCostEx() - 1);
costSz += op1->GetCostSz();
goto DONE;
}
#endif
break;
default:
break;
}
costEx += op1->GetCostEx();
costSz += op1->GetCostSz();
goto DONE;
}
/* Binary operator - check for certain special cases */
lvlb = 0;
/* Default Binary ops have a cost of 1,1 */
costEx = 1;
costSz = 1;
#ifdef TARGET_ARM
if (isflt)
{
costSz += 2;
}
#endif
#ifndef TARGET_64BIT
if (varTypeIsLong(op1->TypeGet()))
{
/* Operations on longs are more expensive */
costEx += 3;
costSz += 3;
}
#endif
switch (oper)
{
case GT_MOD:
case GT_UMOD:
/* Modulo by a power of 2 is easy */
if (op2->IsCnsIntOrI())
{
size_t ival = op2->AsIntConCommon()->IconValue();
if (ival > 0 && ival == genFindLowestBit(ival))
{
break;
}
}
FALLTHROUGH;
case GT_DIV:
case GT_UDIV:
if (isflt)
{
/* fp division is very expensive to execute */
costEx = 36; // TYP_DOUBLE
costSz += 3;
}
else
{
/* integer division is also very expensive */
costEx = 20;
costSz += 2;
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 3;
}
break;
case GT_MUL:
if (isflt)
{
/* FP multiplication instructions are more expensive */
costEx += 4;
costSz += 3;
}
else
{
/* Integer multiplication instructions are more expensive */
costEx += 3;
costSz += 2;
if (tree->gtOverflow())
{
/* Overflow check are more expensive */
costEx += 3;
costSz += 3;
}
#ifdef TARGET_X86
if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
{
/* We use imulEAX for TYP_LONG and overflow multiplications */
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 4;
/* The 64-bit imul instruction costs more */
costEx += 4;
}
#endif // TARGET_X86
}
break;
case GT_ADD:
case GT_SUB:
if (isflt)
{
/* FP instructions are a bit more expensive */
costEx += 4;
costSz += 3;
break;
}
/* Overflow check are more expensive */
if (tree->gtOverflow())
{
costEx += 3;
costSz += 3;
}
break;
case GT_BOUNDS_CHECK:
costEx = 4; // cmp reg,reg and jae throw (not taken)
costSz = 7; // jump to cold section
break;
case GT_COMMA:
/* Comma tosses the result of the left operand */
gtSetEvalOrder(op1);
level = gtSetEvalOrder(op2);
/* GT_COMMA cost is the sum of op1 and op2 costs */
costEx = (op1->GetCostEx() + op2->GetCostEx());
costSz = (op1->GetCostSz() + op2->GetCostSz());
goto DONE;
case GT_COLON:
level = gtSetEvalOrder(op1);
lvl2 = gtSetEvalOrder(op2);
if (level < lvl2)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx = op1->GetCostEx() + op2->GetCostEx();
costSz = op1->GetCostSz() + op2->GetCostSz();
goto DONE;
case GT_INDEX_ADDR:
costEx = 6; // cmp reg,reg; jae throw; mov reg, [addrmode] (not taken)
costSz = 9; // jump to cold section
break;
case GT_ASG:
/* Assignments need a bit of special handling */
/* Process the target */
level = gtSetEvalOrder(op1);
if (gtIsLikelyRegVar(op1))
{
assert(lvlb == 0);
lvl2 = gtSetEvalOrder(op2);
/* Assignment to an enregistered LCL_VAR */
costEx = op2->GetCostEx();
costSz = max(3, op2->GetCostSz()); // 3 is an estimate for a reg-reg assignment
goto DONE_OP1_AFTER_COST;
}
goto DONE_OP1;
default:
break;
}
/* Process the sub-operands */
level = gtSetEvalOrder(op1);
if (lvlb < 0)
{
level -= lvlb; // lvlb is negative, so this increases level
lvlb = 0;
}
DONE_OP1:
assert(lvlb >= 0);
lvl2 = gtSetEvalOrder(op2) + lvlb;
costEx += (op1->GetCostEx() + op2->GetCostEx());
costSz += (op1->GetCostSz() + op2->GetCostSz());
DONE_OP1_AFTER_COST:
bool bReverseInAssignment = false;
if (oper == GT_ASG && (!optValnumCSE_phase || optCSE_canSwap(op1, op2)))
{
GenTree* op1Val = op1;
// Skip over the GT_IND/GT_ADDR tree (if one exists)
//
if ((op1->gtOper == GT_IND) && (op1->AsOp()->gtOp1->gtOper == GT_ADDR))
{
op1Val = op1->AsOp()->gtOp1->AsOp()->gtOp1;
}
switch (op1Val->gtOper)
{
case GT_IND:
case GT_BLK:
case GT_OBJ:
{
// In an ASG(IND(addr), ...), the "IND" is a pure syntactical element,
// the actual indirection will only be realized at the point of the ASG
// itself. As such, we can disard any side effects "induced" by it in
// this logic.
//
// Note that for local "addr"s, liveness depends on seeing the defs and
// uses in correct order, and so we MUST reverse the ASG in that case.
//
GenTree* op1Addr = op1->AsIndir()->Addr();
if (op1Addr->IsLocalAddrExpr() || op1Addr->IsInvariant())
{
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
}
if (op1Addr->gtFlags & GTF_ALL_EFFECT)
{
break;
}
// In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
if (op2->gtFlags & GTF_ASG)
{
break;
}
// If op2 is simple then evaluate op1 first
if (op2->OperKind() & GTK_LEAF)
{
break;
}
}
// fall through and set GTF_REVERSE_OPS
FALLTHROUGH;
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_CLS_VAR:
// We evaluate op2 before op1
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
default:
break;
}
}
else if (GenTree::OperIsCompare(oper))
{
/* Float compares remove both operands from the FP stack */
/* Also FP comparison uses EAX for flags */
if (varTypeIsFloating(op1->TypeGet()))
{
level++;
lvl2++;
}
if ((tree->gtFlags & GTF_RELOP_JMP_USED) == 0)
{
/* Using a setcc instruction is more expensive */
costEx += 3;
}
}
/* Check for other interesting cases */
switch (oper)
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
/* Variable sized shifts are more expensive and use REG_SHIFT */
if (!op2->IsCnsIntOrI())
{
costEx += 3;
#ifndef TARGET_64BIT
// Variable sized LONG shifts require the use of a helper call
//
if (tree->gtType == TYP_LONG)
{
level += 5;
lvl2 += 5;
costEx += 3 * IND_COST_EX;
costSz += 4;
}
#endif // !TARGET_64BIT
}
break;
case GT_INTRINSIC:
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Atan2:
case NI_System_Math_Pow:
// These math intrinsics are actually implemented by user calls.
// Increase the Sethi 'complexity' by two to reflect the argument
// register requirement.
level += 2;
break;
case NI_System_Math_Max:
case NI_System_Math_Min:
level++;
break;
default:
assert(!"Unknown binary GT_INTRINSIC operator");
break;
}
break;
default:
break;
}
/* We need to evalutate constants later as many places in codegen
can't handle op1 being a constant. This is normally naturally
enforced as constants have the least level of 0. However,
sometimes we end up with a tree like "cns1 < nop(cns2)". In
such cases, both sides have a level of 0. So encourage constants
to be evaluated last in such cases */
if ((level == 0) && (level == lvl2) && op1->OperIsConst() &&
(tree->OperIsCommutative() || tree->OperIsCompare()))
{
lvl2++;
}
/* We try to swap operands if the second one is more expensive */
bool tryToSwap;
GenTree* opA;
GenTree* opB;
if (tree->gtFlags & GTF_REVERSE_OPS)
{
opA = op2;
opB = op1;
}
else
{
opA = op1;
opB = op2;
}
if (fgOrder == FGOrderLinear)
{
// Don't swap anything if we're in linear order; we're really just interested in the costs.
tryToSwap = false;
}
else if (bReverseInAssignment)
{
// Assignments are special, we want the reverseops flags
// so if possible it was set above.
tryToSwap = false;
}
else if ((oper == GT_INTRINSIC) && IsIntrinsicImplementedByUserCall(tree->AsIntrinsic()->gtIntrinsicName))
{
// We do not swap operand execution order for intrinsics that are implemented by user calls
// because of trickiness around ensuring the execution order does not change during rationalization.
tryToSwap = false;
}
else if (oper == GT_BOUNDS_CHECK)
{
// Bounds check nodes used to not be binary, thus GTF_REVERSE_OPS was
// not enabled for them. This condition preserves that behavior.
// Additionally, CQ analysis shows that enabling GTF_REVERSE_OPS
// for these nodes leads to mixed results at best.
tryToSwap = false;
}
else
{
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tryToSwap = (level > lvl2);
}
else
{
tryToSwap = (level < lvl2);
}
// Try to force extra swapping when in the stress mode:
if (compStressCompile(STRESS_REVERSE_FLAG, 60) && ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
!op2->OperIsConst())
{
tryToSwap = true;
}
}
if (tryToSwap)
{
bool canSwap = gtCanSwapOrder(opA, opB);
if (canSwap)
{
/* Can we swap the order by commuting the operands? */
switch (oper)
{
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (GenTree::SwapRelop(oper) != oper)
{
tree->SetOper(GenTree::SwapRelop(oper), GenTree::PRESERVE_VN);
}
FALLTHROUGH;
case GT_ADD:
case GT_MUL:
case GT_OR:
case GT_XOR:
case GT_AND:
/* Swap the operands */
tree->AsOp()->gtOp1 = op2;
tree->AsOp()->gtOp2 = op1;
break;
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
break;
default:
/* Mark the operand's evaluation order to be swapped */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
else
{
tree->gtFlags |= GTF_REVERSE_OPS;
}
break;
}
}
}
/* Swap the level counts */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
unsigned tmpl;
tmpl = level;
level = lvl2;
lvl2 = tmpl;
}
/* Compute the sethi number for this binary operator */
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
unsigned lvl2; // Scratch variable
case GT_CALL:
assert(tree->gtFlags & GTF_CALL);
level = 0;
costEx = 5;
costSz = 2;
GenTreeCall* call;
call = tree->AsCall();
/* Evaluate the 'this' argument, if present */
if (tree->AsCall()->gtCallThisArg != nullptr)
{
GenTree* thisVal = tree->AsCall()->gtCallThisArg->GetNode();
lvl2 = gtSetEvalOrder(thisVal);
if (level < lvl2)
{
level = lvl2;
}
costEx += thisVal->GetCostEx();
costSz += thisVal->GetCostSz() + 1;
}
/* Evaluate the arguments, right to left */
if (call->gtCallArgs != nullptr)
{
const bool lateArgs = false;
lvl2 = gtSetCallArgsOrder(call->Args(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
/* Evaluate the temp register arguments list
* This is a "hidden" list and its only purpose is to
* extend the life of temps until we make the call */
if (call->gtCallLateArgs != nullptr)
{
const bool lateArgs = true;
lvl2 = gtSetCallArgsOrder(call->LateArgs(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
if (call->gtCallType == CT_INDIRECT)
{
// pinvoke-calli cookie is a constant, or constant indirection
assert(call->gtCallCookie == nullptr || call->gtCallCookie->gtOper == GT_CNS_INT ||
call->gtCallCookie->gtOper == GT_IND);
GenTree* indirect = call->gtCallAddr;
lvl2 = gtSetEvalOrder(indirect);
if (level < lvl2)
{
level = lvl2;
}
costEx += indirect->GetCostEx() + IND_COST_EX;
costSz += indirect->GetCostSz();
}
else
{
if (call->IsVirtual())
{
GenTree* controlExpr = call->gtControlExpr;
if (controlExpr != nullptr)
{
lvl2 = gtSetEvalOrder(controlExpr);
if (level < lvl2)
{
level = lvl2;
}
costEx += controlExpr->GetCostEx();
costSz += controlExpr->GetCostSz();
}
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
// We generate movw/movt/ldr
costEx += (1 + IND_COST_EX);
costSz += 8;
if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
{
// Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
costSz += 2;
}
}
else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
costEx += 2;
costSz += 6;
}
costSz += 2;
#endif
#ifdef TARGET_XARCH
costSz += 3;
#endif
}
level += 1;
/* Virtual calls are a bit more expensive */
if (call->IsVirtual())
{
costEx += 2 * IND_COST_EX;
costSz += 2;
}
level += 5;
costEx += 3 * IND_COST_EX;
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
return gtSetMultiOpOrder(tree->AsMultiOp());
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
level = gtSetEvalOrder(arrElem->gtArrObj);
costEx = arrElem->gtArrObj->GetCostEx();
costSz = arrElem->gtArrObj->GetCostSz();
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
lvl2 = gtSetEvalOrder(arrElem->gtArrInds[dim]);
if (level < lvl2)
{
level = lvl2;
}
costEx += arrElem->gtArrInds[dim]->GetCostEx();
costSz += arrElem->gtArrInds[dim]->GetCostSz();
}
level += arrElem->gtArrRank;
costEx += 2 + (arrElem->gtArrRank * (IND_COST_EX + 1));
costSz += 2 + (arrElem->gtArrRank * 2);
}
break;
case GT_ARR_OFFSET:
level = gtSetEvalOrder(tree->AsArrOffs()->gtOffset);
costEx = tree->AsArrOffs()->gtOffset->GetCostEx();
costSz = tree->AsArrOffs()->gtOffset->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtIndex);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtIndex->GetCostEx();
costSz += tree->AsArrOffs()->gtIndex->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtArrObj);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtArrObj->GetCostEx();
costSz += tree->AsArrOffs()->gtArrObj->GetCostSz();
break;
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
lvl2 = gtSetEvalOrder(use.GetNode());
// PHI args should always have cost 0 and level 0
assert(lvl2 == 0);
assert(use.GetNode()->GetCostEx() == 0);
assert(use.GetNode()->GetCostSz() == 0);
}
// Give it a level of 2, just to be sure that it's greater than the LHS of
// the parent assignment and the PHI gets evaluated first in linear order.
// See also SsaBuilder::InsertPhi and SsaBuilder::AddPhiArg.
level = 2;
costEx = 0;
costSz = 0;
break;
case GT_FIELD_LIST:
level = 0;
costEx = 0;
costSz = 0;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
unsigned opLevel = gtSetEvalOrder(use.GetNode());
level = max(level, opLevel);
gtSetEvalOrder(use.GetNode());
costEx += use.GetNode()->GetCostEx();
costSz += use.GetNode()->GetCostSz();
}
break;
case GT_CMPXCHG:
level = gtSetEvalOrder(tree->AsCmpXchg()->gtOpLocation);
costSz = tree->AsCmpXchg()->gtOpLocation->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpValue);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpValue->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpComparand);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpComparand->GetCostSz();
costEx = MAX_COST; // Seriously, what could be more expensive than lock cmpxchg?
costSz += 5; // size of lock cmpxchg [reg+C], reg
break;
case GT_STORE_DYN_BLK:
level = gtSetEvalOrder(tree->AsStoreDynBlk()->Addr());
costEx = tree->AsStoreDynBlk()->Addr()->GetCostEx();
costSz = tree->AsStoreDynBlk()->Addr()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->Data());
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->Data()->GetCostEx();
costSz += tree->AsStoreDynBlk()->Data()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->gtDynamicSize);
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->gtDynamicSize->GetCostEx();
costSz += tree->AsStoreDynBlk()->gtDynamicSize->GetCostSz();
break;
default:
JITDUMP("unexpected operator in this tree:\n");
DISPTREE(tree);
NO_WAY("unexpected operator");
}
DONE:
// Some path through this function must have set the costs.
assert(costEx != -1);
assert(costSz != -1);
tree->SetCosts(costEx, costSz);
return level;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
#ifdef DEBUG
bool GenTree::OperSupportsReverseOpEvalOrder(Compiler* comp) const
{
if (OperIsBinary())
{
if ((AsOp()->gtGetOp1() == nullptr) || (AsOp()->gtGetOp2() == nullptr))
{
return false;
}
if (OperIs(GT_COMMA, GT_BOUNDS_CHECK))
{
return false;
}
if (OperIs(GT_INTRINSIC))
{
return !comp->IsIntrinsicImplementedByUserCall(AsIntrinsic()->gtIntrinsicName);
}
return true;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
if (OperIsMultiOp())
{
return AsMultiOp()->GetOperandCount() == 2;
}
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
return false;
}
#endif // DEBUG
/*****************************************************************************
*
* If the given tree is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0. Note that we never return 1,
* to match the behavior of GetScaleIndexShf().
*/
unsigned GenTree::GetScaleIndexMul()
{
if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntConCommon()->IconValue()) && AsIntConCommon()->IconValue() != 1)
{
return (unsigned)AsIntConCommon()->IconValue();
}
return 0;
}
/*****************************************************************************
*
* If the given tree is the right-hand side of a left shift (that is,
* 'y' in the tree 'x' << 'y'), and it is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0.
*/
unsigned GenTree::GetScaleIndexShf()
{
if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntConCommon()->IconValue()))
{
return (unsigned)(1 << AsIntConCommon()->IconValue());
}
return 0;
}
/*****************************************************************************
*
* If the given tree is a scaled index (i.e. "op * 4" or "op << 2"), returns
* the multiplier: 2, 4, or 8; otherwise returns 0. Note that "1" is never
* returned.
*/
unsigned GenTree::GetScaledIndex()
{
// with (!opts.OptEnabled(CLFLG_CONSTANTFOLD) we can have
// CNS_INT * CNS_INT
//
if (AsOp()->gtOp1->IsCnsIntOrI())
{
return 0;
}
switch (gtOper)
{
case GT_MUL:
return AsOp()->gtOp2->GetScaleIndexMul();
case GT_LSH:
return AsOp()->gtOp2->GetScaleIndexShf();
default:
assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
break;
}
return 0;
}
//------------------------------------------------------------------------
// TryGetUse: Get the use edge for an operand of this tree.
//
// Arguments:
// operand - the node to find the use for
// pUse - [out] parameter for the use
//
// Return Value:
// Whether "operand" is a child of this node. If it is, "*pUse" is set,
// allowing for the replacement of "operand" with some other node.
//
bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
switch (OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
return false;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_RETFILT:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
// Variadic nodes
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
if (this->AsUnOp()->gtOp1->gtOper == GT_FIELD_LIST)
{
return this->AsUnOp()->gtOp1->TryGetUse(operand, pUse);
}
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
#endif // FEATURE_ARG_SPLIT
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
for (GenTree** opUse : this->AsMultiOp()->UseEdges())
{
if (*opUse == operand)
{
*pUse = opUse;
return true;
}
}
return false;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& phiUse : AsPhi()->Uses())
{
if (phiUse.GetNode() == operand)
{
*pUse = &phiUse.NodeRef();
return true;
}
}
return false;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& fieldUse : AsFieldList()->Uses())
{
if (fieldUse.GetNode() == operand)
{
*pUse = &fieldUse.NodeRef();
return true;
}
}
return false;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg();
if (operand == cmpXchg->gtOpLocation)
{
*pUse = &cmpXchg->gtOpLocation;
return true;
}
if (operand == cmpXchg->gtOpValue)
{
*pUse = &cmpXchg->gtOpValue;
return true;
}
if (operand == cmpXchg->gtOpComparand)
{
*pUse = &cmpXchg->gtOpComparand;
return true;
}
return false;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = this->AsArrElem();
if (operand == arrElem->gtArrObj)
{
*pUse = &arrElem->gtArrObj;
return true;
}
for (unsigned i = 0; i < arrElem->gtArrRank; i++)
{
if (operand == arrElem->gtArrInds[i])
{
*pUse = &arrElem->gtArrInds[i];
return true;
}
}
return false;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = this->AsArrOffs();
if (operand == arrOffs->gtOffset)
{
*pUse = &arrOffs->gtOffset;
return true;
}
if (operand == arrOffs->gtIndex)
{
*pUse = &arrOffs->gtIndex;
return true;
}
if (operand == arrOffs->gtArrObj)
{
*pUse = &arrOffs->gtArrObj;
return true;
}
return false;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
if (operand == dynBlock->gtOp1)
{
*pUse = &dynBlock->gtOp1;
return true;
}
if (operand == dynBlock->gtOp2)
{
*pUse = &dynBlock->gtOp2;
return true;
}
if (operand == dynBlock->gtDynamicSize)
{
*pUse = &dynBlock->gtDynamicSize;
return true;
}
return false;
}
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
*pUse = &call->gtCallThisArg->NodeRef();
return true;
}
if (operand == call->gtControlExpr)
{
*pUse = &call->gtControlExpr;
return true;
}
if (call->gtCallType == CT_INDIRECT)
{
if (operand == call->gtCallCookie)
{
*pUse = &call->gtCallCookie;
return true;
}
if (operand == call->gtCallAddr)
{
*pUse = &call->gtCallAddr;
return true;
}
}
for (GenTreeCall::Use& argUse : call->Args())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
for (GenTreeCall::Use& argUse : call->LateArgs())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
return false;
}
// Binary nodes
default:
assert(this->OperIsBinary());
return TryGetUseBinOp(operand, pUse);
}
}
bool GenTree::TryGetUseBinOp(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
assert(this->OperIsBinary());
GenTreeOp* const binOp = this->AsOp();
if (operand == binOp->gtOp1)
{
*pUse = &binOp->gtOp1;
return true;
}
if (operand == binOp->gtOp2)
{
*pUse = &binOp->gtOp2;
return true;
}
return false;
}
//------------------------------------------------------------------------
// GenTree::ReplaceOperand:
// Replace a given operand to this node with a new operand. If the
// current node is a call node, this will also udpate the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTree::ReplaceOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
if (OperGet() == GT_CALL)
{
AsCall()->ReplaceCallOperand(useEdge, replacement);
}
else
{
*useEdge = replacement;
}
}
//------------------------------------------------------------------------
// gtGetParent: Get the parent of this node, and optionally capture the
// pointer to the child so that it can be modified.
//
// Arguments:
// pUse - A pointer to a GenTree** (yes, that's three
// levels, i.e. GenTree ***), which if non-null,
// will be set to point to the field in the parent
// that points to this node.
//
// Return value
// The parent of this node.
//
// Notes:
// This requires that the execution order must be defined (i.e. gtSetEvalOrder() has been called).
// To enable the child to be replaced, it accepts an argument, "pUse", that, if non-null,
// will be set to point to the child pointer in the parent that points to this node.
//
GenTree* GenTree::gtGetParent(GenTree*** pUse)
{
// Find the parent node; it must be after this node in the execution order.
GenTree* user;
GenTree** use = nullptr;
for (user = gtNext; user != nullptr; user = user->gtNext)
{
if (user->TryGetUse(this, &use))
{
break;
}
}
if (pUse != nullptr)
{
*pUse = use;
}
return user;
}
//-------------------------------------------------------------------------
// gtRetExprVal - walk back through GT_RET_EXPRs
//
// Arguments:
// pbbFlags - out-parameter that is set to the flags of the basic block
// containing the inlinee return value. The value is 0
// for unsuccessful inlines.
//
// Returns:
// tree representing return value from a successful inline,
// or original call for failed or yet to be determined inline.
//
// Notes:
// Multi-level inlines can form chains of GT_RET_EXPRs.
// This method walks back to the root of the chain.
//
GenTree* GenTree::gtRetExprVal(BasicBlockFlags* pbbFlags /* = nullptr */)
{
GenTree* retExprVal = this;
BasicBlockFlags bbFlags = BBF_EMPTY;
assert(!retExprVal->OperIs(GT_PUTARG_TYPE));
while (retExprVal->OperIs(GT_RET_EXPR))
{
const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr();
bbFlags = retExpr->bbFlags;
retExprVal = retExpr->gtInlineCandidate;
}
if (pbbFlags != nullptr)
{
*pbbFlags = bbFlags;
}
return retExprVal;
}
//------------------------------------------------------------------------------
// OperRequiresAsgFlag : Check whether the operation requires GTF_ASG flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresAsgFlag()
{
if (OperIs(GT_ASG, GT_STORE_DYN_BLK) ||
OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER))
{
return true;
}
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
if (hwIntrinsicNode->OperIsMemoryStore())
{
// A MemoryStore operation is an assignment
return true;
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//------------------------------------------------------------------------------
// OperRequiresCallFlag : Check whether the operation requires GTF_CALL flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresCallFlag(Compiler* comp)
{
switch (gtOper)
{
case GT_CALL:
return true;
case GT_KEEPALIVE:
return true;
case GT_INTRINSIC:
return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicName);
#if FEATURE_FIXED_OUT_ARGS && !defined(TARGET_64BIT)
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// Variable shifts of a long end up being helper calls, so mark the tree as such in morph.
// This is potentially too conservative, since they'll get treated as having side effects.
// It is important to mark them as calls so if they are part of an argument list,
// they will get sorted and processed properly (for example, it is important to handle
// all nested calls before putting struct arguments in the argument registers). We
// could mark the trees just before argument processing, but it would require a full
// tree walk of the argument tree, so we just do it when morphing, instead, even though we'll
// mark non-argument trees (that will still get converted to calls, anyway).
return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT);
#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperIsImplicitIndir : Check whether the operation contains an implicit
// indirection.
// Arguments:
// this - a GenTree node
//
// Return Value:
// True if the given node contains an implicit indirection
//
// Note that for the [HW]INTRINSIC nodes we have to examine the
// details of the node to determine its result.
//
bool GenTree::OperIsImplicitIndir() const
{
switch (gtOper)
{
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_BOX:
case GT_ARR_INDEX:
case GT_ARR_ELEM:
case GT_ARR_OFFSET:
return true;
case GT_INTRINSIC:
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
#ifdef FEATURE_SIMD
case GT_SIMD:
{
return AsSIMD()->OperIsMemoryLoad();
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
return AsHWIntrinsic()->OperIsMemoryLoadOrStore();
}
#endif // FEATURE_HW_INTRINSICS
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperMayThrow : Check whether the operation may throw.
//
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if the given operator may cause an exception
bool GenTree::OperMayThrow(Compiler* comp)
{
GenTree* op;
switch (gtOper)
{
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
/* Division with a non-zero, non-minus-one constant does not throw an exception */
op = AsOp()->gtOp2;
if (varTypeIsFloating(op->TypeGet()))
{
return false; // Floating point division does not throw.
}
// For integers only division by 0 or by -1 can throw
if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
{
return false;
}
return true;
case GT_INTRINSIC:
// If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
// Currently, this is the only intrinsic that can throw an exception.
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
case GT_CALL:
CorInfoHelpFunc helper;
helper = comp->eeGetHelperNum(this->AsCall()->gtCallMethHnd);
return ((helper == CORINFO_HELP_UNDEF) || !comp->s_helperCallProperties.NoThrow(helper));
case GT_IND:
case GT_BLK:
case GT_OBJ:
case GT_NULLCHECK:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) && comp->fgAddrCouldBeNull(this->AsIndir()->Addr()));
case GT_ARR_LENGTH:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) &&
comp->fgAddrCouldBeNull(this->AsArrLen()->ArrRef()));
case GT_ARR_ELEM:
return comp->fgAddrCouldBeNull(this->AsArrElem()->gtArrObj);
case GT_FIELD:
{
GenTree* fldObj = this->AsField()->GetFldObj();
if (fldObj != nullptr)
{
return comp->fgAddrCouldBeNull(fldObj);
}
return false;
}
case GT_BOUNDS_CHECK:
case GT_ARR_INDEX:
case GT_ARR_OFFSET:
case GT_LCLHEAP:
case GT_CKFINITE:
case GT_INDEX_ADDR:
return true;
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
assert(hwIntrinsicNode != nullptr);
if (hwIntrinsicNode->OperIsMemoryLoadOrStore())
{
// This operation contains an implicit indirection
// it could throw a null reference exception.
//
return true;
}
break;
}
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
/* Overflow arithmetic operations also throw exceptions */
if (gtOverflowEx())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetFieldCount: Return the register count for a multi-reg lclVar.
//
// Arguments:
// compiler - the current Compiler instance.
//
// Return Value:
// Returns the number of registers defined by this node.
//
// Notes:
// This must be a multireg lclVar.
//
unsigned int GenTreeLclVar::GetFieldCount(Compiler* compiler) const
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
return varDsc->lvFieldCnt;
}
//-----------------------------------------------------------------------------------
// GetFieldTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// compiler - the current Compiler instance.
// idx - which register type to return.
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg lclVar and 'regIndex' must be a valid index for this node.
//
var_types GenTreeLclVar::GetFieldTypeByIndex(Compiler* compiler, unsigned idx)
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + idx);
assert(fieldVarDsc->TypeGet() != TYP_STRUCT); // Don't expect struct fields.
return fieldVarDsc->TypeGet();
}
#if DEBUGGABLE_GENTREE
// static
GenTree::VtablePtr GenTree::s_vtablesForOpers[] = {nullptr};
GenTree::VtablePtr GenTree::s_vtableForOp = nullptr;
GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
{
noway_assert(oper < GT_COUNT);
// First, check a cache.
if (s_vtablesForOpers[oper] != nullptr)
{
return s_vtablesForOpers[oper];
}
// Otherwise, look up the correct vtable entry. Note that we want the most derived GenTree subtype
// for an oper. E.g., GT_LCL_VAR is defined in GTSTRUCT_3 as GenTreeLclVar and in GTSTRUCT_N as
// GenTreeLclVarCommon. We want the GenTreeLclVar vtable, since nothing should actually be
// instantiated as a GenTreeLclVarCommon.
VtablePtr res = nullptr;
switch (oper)
{
// clang-format off
#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
#define GTSTRUCT_1(nm, tag) \
case tag: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_2(nm, tag, tag2) \
case tag: \
case tag2: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_3(nm, tag, tag2, tag3) \
case tag: \
case tag2: \
case tag3: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_4(nm, tag, tag2, tag3, tag4) \
case tag: \
case tag2: \
case tag3: \
case tag4: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
#define GTSTRUCT_2_SPECIAL(nm, tag, tag2) /*handle explicitly*/
#define GTSTRUCT_3_SPECIAL(nm, tag, tag2, tag3) /*handle explicitly*/
#include "gtstructs.h"
// clang-format on
// Handle the special cases.
// The following opers are in GTSTRUCT_N but no other place (namely, no subtypes).
case GT_STORE_BLK:
case GT_BLK:
{
GenTreeBlk gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
case GT_IND:
case GT_NULLCHECK:
{
GenTreeIndir gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
// We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified
// in their proper subtype. Similarly for GenTreeIndir.
default:
{
// Should be unary or binary op.
if (s_vtableForOp == nullptr)
{
unsigned opKind = OperKind(oper);
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
GenTreeIntCon dummyOp(TYP_INT, 0);
GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast<VtablePtr*>(>);
}
res = s_vtableForOp;
break;
}
}
s_vtablesForOpers[oper] = res;
return res;
}
void GenTree::SetVtableForOper(genTreeOps oper)
{
*reinterpret_cast<VtablePtr*>(this) = GetVtableForOper(oper);
}
#endif // DEBUGGABLE_GENTREE
GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
{
assert(op1 != nullptr);
assert(op2 != nullptr);
// We should not be allocating nodes that extend GenTreeOp with this;
// should call the appropriate constructor for the extended type.
assert(!GenTree::IsExOp(GenTree::OperKind(oper)));
GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, op2);
return node;
}
GenTreeColon* Compiler::gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode)
{
return new (this, GT_COLON) GenTreeColon(TYP_INT, elseNode, thenNode);
}
GenTreeQmark* Compiler::gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon)
{
compQmarkUsed = true;
GenTreeQmark* result = new (this, GT_QMARK) GenTreeQmark(type, cond, colon);
#ifdef DEBUG
if (compQmarkRationalized)
{
fgCheckQmarkAllowedForm(result);
}
#endif
return result;
}
GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
GenTreeIntCon* Compiler::gtNewNull()
{
return gtNewIconNode(0, TYP_REF);
}
GenTreeIntCon* Compiler::gtNewTrue()
{
return gtNewIconNode(1, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewFalse()
{
return gtNewIconNode(0, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq)
{
GenTreeIntCon* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset));
node->gtFieldSeq = fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq;
return node;
}
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
GenTree* Compiler::gtNewJmpTableNode()
{
return new (this, GT_JMPTABLE) GenTree(GT_JMPTABLE, TYP_I_IMPL);
}
/*****************************************************************************
*
* Converts an annotated token into an icon flags (so that we will later be
* able to tell the type of the handle that will be embedded in the icon
* node)
*/
GenTreeFlags Compiler::gtTokenToIconFlags(unsigned token)
{
GenTreeFlags flags = GTF_EMPTY;
switch (TypeFromToken(token))
{
case mdtTypeRef:
case mdtTypeDef:
case mdtTypeSpec:
flags = GTF_ICON_CLASS_HDL;
break;
case mdtMethodDef:
flags = GTF_ICON_METHOD_HDL;
break;
case mdtFieldDef:
flags = GTF_ICON_FIELD_HDL;
break;
default:
flags = GTF_ICON_TOKEN_HDL;
break;
}
return flags;
}
//-----------------------------------------------------------------------------------------
// gtNewIndOfIconHandleNode: Creates an indirection GenTree node of a constant handle
//
// Arguments:
// indType - The type returned by the indirection node
// addr - The constant address to read from
// iconFlags - The GTF_ICON flag value that specifies the kind of handle that we have
// isInvariant - The indNode should also be marked as invariant
//
// Return Value:
// Returns a GT_IND node representing value at the address provided by 'value'
//
// Notes:
// The GT_IND node is marked as non-faulting
// If the indType is GT_REF we also mark the indNode as GTF_GLOB_REF
//
GenTree* Compiler::gtNewIndOfIconHandleNode(var_types indType, size_t addr, GenTreeFlags iconFlags, bool isInvariant)
{
GenTree* addrNode = gtNewIconHandleNode(addr, iconFlags);
GenTree* indNode = gtNewOperNode(GT_IND, indType, addrNode);
// This indirection won't cause an exception.
//
indNode->gtFlags |= GTF_IND_NONFAULTING;
if (isInvariant)
{
assert(iconFlags != GTF_ICON_STATIC_HDL); // Pointer to a mutable class Static variable
assert(iconFlags != GTF_ICON_BBC_PTR); // Pointer to a mutable basic block count value
assert(iconFlags != GTF_ICON_GLOBAL_PTR); // Pointer to mutable data from the VM state
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
if (iconFlags == GTF_ICON_STR_HDL)
{
// String literals are never null
indNode->gtFlags |= GTF_IND_NONNULL;
}
}
else
{
// GLOB_REF needs to be set for indirections returning values from mutable
// locations, so that e. g. args sorting does not reorder them with calls.
indNode->gtFlags |= GTF_GLOB_REF;
}
return indNode;
}
/*****************************************************************************
*
* Allocates a integer constant entry that represents a HANDLE to something.
* It may not be allowed to embed HANDLEs directly into the JITed code (for eg,
* as arguments to JIT helpers). Get a corresponding value that can be embedded.
* If the handle needs to be accessed via an indirection, pValue points to it.
*/
GenTree* Compiler::gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags iconFlags, void* compileTimeHandle)
{
GenTree* iconNode;
GenTree* handleNode;
if (value != nullptr)
{
// When 'value' is non-null, pValue is required to be null
assert(pValue == nullptr);
// use 'value' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)value, iconFlags);
// 'value' is the handle
handleNode = iconNode;
}
else
{
// When 'value' is null, pValue is required to be non-null
assert(pValue != nullptr);
// use 'pValue' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)pValue, iconFlags);
// 'pValue' is an address of a location that contains the handle
// construct the indirection of 'pValue'
handleNode = gtNewOperNode(GT_IND, TYP_I_IMPL, iconNode);
// This indirection won't cause an exception.
handleNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
handleNode->gtFlags |= GTF_IND_INVARIANT;
}
iconNode->AsIntCon()->gtCompileTimeHandle = (size_t)compileTimeHandle;
return handleNode;
}
/*****************************************************************************/
GenTree* Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue)
{
GenTree* tree = nullptr;
switch (iat)
{
case IAT_VALUE:
setMethodHasFrozenString();
tree = gtNewIconEmbHndNode(pValue, nullptr, GTF_ICON_STR_HDL, nullptr);
tree->gtType = TYP_REF;
#ifdef DEBUG
tree->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PVALUE: // The value needs to be accessed via an indirection
// Create an indirection
tree = gtNewIndOfIconHandleNode(TYP_REF, (size_t)pValue, GTF_ICON_STR_HDL, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PPVALUE: // The value needs to be accessed via a double indirection
// Create the first indirection
tree = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pValue, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
// Create the second indirection
tree = gtNewOperNode(GT_IND, TYP_REF, tree);
// This indirection won't cause an exception.
tree->gtFlags |= GTF_IND_NONFAULTING;
// This indirection points into the gloabal heap (it is String Object)
tree->gtFlags |= GTF_GLOB_REF;
break;
default:
noway_assert(!"Unexpected InfoAccessType");
}
return tree;
}
//------------------------------------------------------------------------
// gtNewStringLiteralLength: create GenTreeIntCon node for the given string
// literal to store its length.
//
// Arguments:
// node - string literal node.
//
// Return Value:
// GenTreeIntCon node with string's length as a value or null.
//
GenTreeIntCon* Compiler::gtNewStringLiteralLength(GenTreeStrCon* node)
{
if (node->IsStringEmptyField())
{
JITDUMP("Folded String.Empty.Length to 0\n");
return gtNewIconNode(0);
}
int length = -1;
const char16_t* str = info.compCompHnd->getStringLiteral(node->gtScpHnd, node->gtSconCPX, &length);
if (length >= 0)
{
GenTreeIntCon* iconNode = gtNewIconNode(length);
// str can be NULL for dynamic context
if (str != nullptr)
{
JITDUMP("Folded '\"%ws\".Length' to '%d'\n", str, length)
}
else
{
JITDUMP("Folded 'CNS_STR.Length' to '%d'\n", length)
}
return iconNode;
}
return nullptr;
}
/*****************************************************************************/
GenTree* Compiler::gtNewLconNode(__int64 value)
{
#ifdef TARGET_64BIT
GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
#else
GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
#endif
return node;
}
GenTree* Compiler::gtNewDconNode(double value, var_types type)
{
GenTree* node = new (this, GT_CNS_DBL) GenTreeDblCon(value, type);
return node;
}
GenTree* Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
{
// 'GT_CNS_STR' nodes later get transformed into 'GT_CALL'
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_CNS_STR]);
GenTree* node = new (this, GT_CALL) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
return node;
}
GenTree* Compiler::gtNewZeroConNode(var_types type)
{
GenTree* zero;
switch (type)
{
case TYP_INT:
zero = gtNewIconNode(0);
break;
case TYP_BYREF:
FALLTHROUGH;
case TYP_REF:
zero = gtNewIconNode(0);
zero->gtType = type;
break;
case TYP_LONG:
zero = gtNewLconNode(0);
break;
case TYP_FLOAT:
zero = gtNewDconNode(0.0);
zero->gtType = type;
break;
case TYP_DOUBLE:
zero = gtNewDconNode(0.0);
break;
default:
noway_assert(!"Bad type in gtNewZeroConNode");
zero = nullptr;
break;
}
return zero;
}
GenTree* Compiler::gtNewOneConNode(var_types type)
{
GenTree* one;
switch (type)
{
case TYP_INT:
case TYP_UINT:
one = gtNewIconNode(1);
break;
case TYP_LONG:
case TYP_ULONG:
one = gtNewLconNode(1);
break;
case TYP_FLOAT:
case TYP_DOUBLE:
one = gtNewDconNode(1.0);
one->gtType = type;
break;
default:
noway_assert(!"Bad type in gtNewOneConNode");
one = nullptr;
break;
}
return one;
}
GenTreeLclVar* Compiler::gtNewStoreLclVar(unsigned dstLclNum, GenTree* src)
{
GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, src->TypeGet(), dstLclNum);
store->gtOp1 = src;
store->gtFlags = (src->gtFlags & GTF_COMMON_MASK);
store->gtFlags |= GTF_VAR_DEF | GTF_ASG;
return store;
}
#ifdef FEATURE_SIMD
//---------------------------------------------------------------------
// gtNewSIMDVectorZero: create a GT_SIMD node for Vector<T>.Zero
//
// Arguments:
// simdType - simd vector type
// simdBaseJitType - element type of vector
// simdSize - size of vector in bytes
GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = genActualType(JitType2PreciseVarType(simdBaseJitType));
GenTree* initVal = gtNewZeroConNode(simdBaseType);
initVal->gtType = simdBaseType;
return gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, simdBaseJitType, simdSize);
}
#endif // FEATURE_SIMD
GenTreeCall* Compiler::gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
return gtNewCallNode(CT_INDIRECT, (CORINFO_METHOD_HANDLE)addr, type, args, di);
}
GenTreeCall* Compiler::gtNewCallNode(
gtCallTypes callType, CORINFO_METHOD_HANDLE callHnd, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
GenTreeCall* node = new (this, GT_CALL) GenTreeCall(genActualType(type));
node->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
#ifdef UNIX_X86_ABI
if (callType == CT_INDIRECT || callType == CT_HELPER)
node->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
node->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
node->gtCallType = callType;
node->gtCallMethHnd = callHnd;
node->gtCallArgs = args;
node->gtCallThisArg = nullptr;
node->fgArgInfo = nullptr;
INDEBUG(node->callSig = nullptr;)
node->tailCallInfo = nullptr;
node->gtRetClsHnd = nullptr;
node->gtControlExpr = nullptr;
node->gtCallMoreFlags = GTF_CALL_M_EMPTY;
if (callType == CT_INDIRECT)
{
node->gtCallCookie = nullptr;
}
else
{
node->gtInlineCandidateInfo = nullptr;
}
node->gtCallLateArgs = nullptr;
node->gtReturnType = type;
#ifdef FEATURE_READYTORUN
node->gtEntryPoint.addr = nullptr;
node->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These get updated after call node is built.
node->gtInlineObservation = InlineObservation::CALLEE_UNUSED_INITIAL;
node->gtRawILOffset = BAD_IL_OFFSET;
node->gtInlineContext = compInlineContext;
#endif
// Spec: Managed Retval sequence points needs to be generated while generating debug info for debuggable code.
//
// Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
// codegen will pass DebugInfo() to emitter, which will cause emitter
// not to emit IP mapping entry.
if (opts.compDbgCode && opts.compDbgInfo && di.IsValid())
{
// Managed Retval - IL offset of the call. This offset is used to emit a
// CALL_INSTRUCTION type sequence point while emitting corresponding native call.
//
// TODO-Cleanup:
// a) (Opt) We need not store this offset if the method doesn't return a
// value. Rather it can be made BAD_IL_OFFSET to prevent a sequence
// point being emitted.
//
// b) (Opt) Add new sequence points only if requested by debugger through
// a new boundary type - ICorDebugInfo::BoundaryTypes
if (genCallSite2DebugInfoMap == nullptr)
{
genCallSite2DebugInfoMap = new (getAllocator()) CallSiteDebugInfoTable(getAllocator());
}
// Make sure that there are no duplicate entries for a given call node
assert(!genCallSite2DebugInfoMap->Lookup(node));
genCallSite2DebugInfoMap->Set(node, di);
}
// Initialize gtOtherRegs
node->ClearOtherRegs();
// Initialize spill flags of gtOtherRegs
node->ClearOtherRegFlags();
#if !defined(TARGET_64BIT)
if (varTypeIsLong(node))
{
assert(node->gtReturnType == node->gtType);
// Initialize Return type descriptor of call node
node->InitializeLongReturnType();
}
#endif // !defined(TARGET_64BIT)
return node;
}
GenTreeLclVar* Compiler::gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
assert(type != TYP_VOID);
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
LclVarDsc* varDsc = lvaGetDesc(lnum);
bool simd12ToSimd16Widening = false;
#if FEATURE_SIMD
// We can additionally have a SIMD12 that was widened to a SIMD16, generally as part of lowering
simd12ToSimd16Widening = (type == TYP_SIMD16) && (varDsc->lvType == TYP_SIMD12);
#endif
assert((type == varDsc->lvType) || simd12ToSimd16Widening ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (varDsc->lvType == TYP_BYREF)));
}
GenTreeLclVar* node = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs));
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
return node;
}
GenTreeLclVar* Compiler::gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
assert(type == lvaTable[lnum].lvType ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (lvaTable[lnum].lvType == TYP_BYREF)));
}
// This local variable node may later get transformed into a large node
assert(GenTree::s_gtNodeSizes[LargeOpOpcode()] > GenTree::s_gtNodeSizes[GT_LCL_VAR]);
GenTreeLclVar* node =
new (this, LargeOpOpcode()) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs) DEBUGARG(/*largeNode*/ true));
return node;
}
GenTreeLclVar* Compiler::gtNewLclVarAddrNode(unsigned lclNum, var_types type)
{
GenTreeLclVar* node = new (this, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, type, lclNum);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, type, lclNum, lclOffs);
node->SetFieldSeq(fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, type, lnum, offset);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
node->SetFieldSeq(FieldSeqStore::NotAField());
return node;
}
GenTree* Compiler::gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags)
{
assert(GenTree::s_gtNodeSizes[GT_RET_EXPR] == TREE_NODE_SZ_LARGE);
GenTreeRetExpr* node = new (this, GT_RET_EXPR) GenTreeRetExpr(type);
node->gtInlineCandidate = inlineCandidate;
node->bbFlags = bbFlags;
if (varTypeIsStruct(inlineCandidate) && !inlineCandidate->OperIsBlkOp())
{
node->gtRetClsHnd = gtGetStructHandle(inlineCandidate);
}
// GT_RET_EXPR node eventually might be bashed back to GT_CALL (when inlining is aborted for example).
// Therefore it should carry the GTF_CALL flag so that all the rules about spilling can apply to it as well.
// For example, impImportLeave or CEE_POP need to spill GT_RET_EXPR before empty the evaluation stack.
node->gtFlags |= GTF_CALL;
return node;
}
GenTreeCall::Use* Compiler::gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node, args);
}
GenTreeCall::Use* Compiler::gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after)
{
after->SetNext(new (this, CMK_ASTNode) GenTreeCall::Use(node, after->GetNext()));
return after->GetNext();
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node);
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3, node4));
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching argNum and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
return argInfo->GetArgEntry(argNum);
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching node and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByNode(GenTreeCall* call, GenTree* node)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->GetNode() == node)
{
return curArgTabEntry;
}
else if (curArgTabEntry->use->GetNode() == node)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
/*****************************************************************************
*
* Find and return the entry with the given "lateArgInx". Requires that one is found
* (asserts this).
*/
fgArgTabEntry* Compiler::gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
assert(lateArgInx != UINT_MAX);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->isLateArg() && curArgTabEntry->GetLateArgInx() == lateArgInx)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
//------------------------------------------------------------------------
// gtArgNodeByLateArgInx: Given a call instruction, find the argument with the given
// late arg index (i.e. the given position in the gtCallLateArgs list).
// Arguments:
// call - the call node
// lateArgInx - the index into the late args list
//
// Return value:
// The late argument node.
//
GenTree* Compiler::gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx)
{
GenTree* argx = nullptr;
unsigned regIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
argx = use.GetNode();
assert(!argx->IsArgPlaceHolderNode()); // No placeholder nodes are in gtCallLateArgs;
if (regIndex == lateArgInx)
{
break;
}
regIndex++;
}
noway_assert(argx != nullptr);
return argx;
}
/*****************************************************************************
*
* Create a node that will assign 'src' to 'dst'.
*/
GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
{
assert(!src->TypeIs(TYP_VOID));
/* Mark the target as being assigned */
if ((dst->gtOper == GT_LCL_VAR) || (dst->OperGet() == GT_LCL_FLD))
{
dst->gtFlags |= GTF_VAR_DEF;
if (dst->IsPartialLclFld(this))
{
// We treat these partial writes as combined uses and defs.
dst->gtFlags |= GTF_VAR_USEASG;
}
}
dst->gtFlags |= GTF_DONT_CSE;
#if defined(FEATURE_SIMD) && !defined(TARGET_X86)
// TODO-CQ: x86 Windows supports multi-reg returns but not SIMD multi-reg returns
if (varTypeIsSIMD(dst->gtType))
{
// We want to track SIMD assignments as being intrinsics since they
// are functionally SIMD `mov` instructions and are more efficient
// when we don't promote, particularly when it occurs due to inlining
SetOpLclRelatedToSIMDIntrinsic(dst);
SetOpLclRelatedToSIMDIntrinsic(src);
}
#endif // FEATURE_SIMD
/* Create the assignment node */
GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp();
/* Mark the expression as containing an assignment */
asg->gtFlags |= GTF_ASG;
return asg;
}
//------------------------------------------------------------------------
// gtNewObjNode: Creates a new Obj node.
//
// Arguments:
// structHnd - The class handle of the struct type.
// addr - The address of the struct.
//
// Return Value:
// Returns a node representing the struct value at the given address.
//
GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd));
// An Obj is not a global reference, if it is known to be a local struct.
if ((addr->gtFlags & GTF_GLOB_REF) == 0)
{
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if (lclNode != nullptr)
{
objNode->gtFlags |= GTF_IND_NONFAULTING;
if (!lvaIsImplicitByRefLocal(lclNode->GetLclNum()))
{
objNode->gtFlags &= ~GTF_GLOB_REF;
}
}
}
return objNode;
}
//------------------------------------------------------------------------
// gtSetObjGcInfo: Set the GC info on an object node
//
// Arguments:
// objNode - The object node of interest
void Compiler::gtSetObjGcInfo(GenTreeObj* objNode)
{
assert(varTypeIsStruct(objNode->TypeGet()));
assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle()));
if (!objNode->GetLayout()->HasGCPtr())
{
objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK);
}
}
//------------------------------------------------------------------------
// gtNewStructVal: Return a node that represents a struct value
//
// Arguments:
// structHnd - The class for the struct
// addr - The address of the struct
//
// Return Value:
// A block, object or local node that represents the struct value pointed to by 'addr'.
GenTree* Compiler::gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
if (val->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = addr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = &(lvaTable[lclNum]);
if (varTypeIsStruct(varDsc) && (varDsc->GetStructHnd() == structHnd) && !lvaIsImplicitByRefLocal(lclNum))
{
return addr->gtGetOp1();
}
}
}
return gtNewObjNode(structHnd, addr);
}
//------------------------------------------------------------------------
// gtNewBlockVal: Return a node that represents a possibly untyped block value
//
// Arguments:
// addr - The address of the block
// size - The size of the block
//
// Return Value:
// A block, object or local node that represents the block value pointed to by 'addr'.
GenTree* Compiler::gtNewBlockVal(GenTree* addr, unsigned size)
{
// By default we treat this as an opaque struct type with known size.
var_types blkType = TYP_STRUCT;
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
#if FEATURE_SIMD
if (varTypeIsSIMD(val) && (genTypeSize(val) == size))
{
blkType = val->TypeGet();
}
#endif // FEATURE_SIMD
if (varTypeIsStruct(val) && val->OperIs(GT_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(val->AsLclVarCommon());
unsigned varSize = varTypeIsStruct(varDsc) ? varDsc->lvExactSize : genTypeSize(varDsc);
if (varSize == size)
{
return val;
}
}
}
return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size));
}
// Creates a new assignment node for a CpObj.
// Parameters (exactly the same as MSIL CpObj):
//
// dstAddr - The target to copy the struct to
// srcAddr - The source to copy the struct from
// structHnd - A class token that represents the type of object being copied. May be null
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
GenTree* Compiler::gtNewCpObjNode(GenTree* dstAddr, GenTree* srcAddr, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
{
GenTree* lhs = gtNewStructVal(structHnd, dstAddr);
GenTree* src = nullptr;
if (lhs->OperIs(GT_OBJ))
{
GenTreeObj* lhsObj = lhs->AsObj();
#if DEBUG
// Codegen for CpObj assumes that we cannot have a struct with GC pointers whose size is not a multiple
// of the register size. The EE currently does not allow this to ensure that GC pointers are aligned
// if the struct is stored in an array. Note that this restriction doesn't apply to stack-allocated objects:
// they are never stored in arrays. We should never get to this method with stack-allocated objects since they
// are never copied so we don't need to exclude them from the assert below.
// Let's assert it just to be safe.
ClassLayout* layout = lhsObj->GetLayout();
unsigned size = layout->GetSize();
assert((layout->GetGCPtrCount() == 0) || (roundUp(size, REGSIZE_BYTES) == size));
#endif
gtSetObjGcInfo(lhsObj);
}
if (srcAddr->OperGet() == GT_ADDR)
{
src = srcAddr->AsOp()->gtOp1;
}
else
{
src = gtNewOperNode(GT_IND, lhs->TypeGet(), srcAddr);
}
GenTree* result = gtNewBlkOpNode(lhs, src, isVolatile, true);
return result;
}
//------------------------------------------------------------------------
// FixupInitBlkValue: Fixup the init value for an initBlk operation
//
// Arguments:
// asgType - The type of assignment that the initBlk is being transformed into
//
// Return Value:
// Modifies the constant value on this node to be the appropriate "fill"
// value for the initblk.
//
// Notes:
// The initBlk MSIL instruction takes a byte value, which must be
// extended to the size of the assignment when an initBlk is transformed
// to an assignment of a primitive type.
// This performs the appropriate extension.
void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
unsigned size = genTypeSize(asgType);
if (size > 1)
{
size_t cns = gtIconVal;
cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
{
cns |= cns << 16;
#ifdef TARGET_64BIT
if (size == 8)
{
cns |= cns << 32;
}
#endif // TARGET_64BIT
// Make the type match for evaluation types.
gtType = asgType;
// if we are initializing a GC type the value being assigned must be zero (null).
assert(!varTypeIsGC(asgType) || (cns == 0));
}
gtIconVal = cns;
}
}
//----------------------------------------------------------------------------
// UsesDivideByConstOptimized:
// returns true if rationalize will use the division by constant
// optimization for this node.
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
// Return Value:
// Return true iff the node is a GT_DIV,GT_UDIV, GT_MOD or GT_UMOD with
// an integer constant and we can perform the division operation using
// a reciprocal multiply or a shift operation.
//
bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp)
{
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (!OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD))
{
return false;
}
#if defined(TARGET_ARM64)
if (OperIs(GT_MOD, GT_UMOD))
{
// MOD, UMOD not supported for ARM64
return false;
}
#endif // TARGET_ARM64
bool isSignedDivide = OperIs(GT_DIV, GT_MOD);
GenTree* dividend = gtGetOp1()->gtEffectiveVal(/*commaOnly*/ true);
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
ssize_t divisorValue;
if (divisor->IsCnsIntOrI())
{
divisorValue = static_cast<ssize_t>(divisor->AsIntCon()->IconValue());
}
else
{
ValueNum vn = divisor->gtVNPair.GetLiberal();
if (comp->vnStore->IsVNConstant(vn))
{
divisorValue = comp->vnStore->CoercedConstantValue<ssize_t>(vn);
}
else
{
return false;
}
}
const var_types divType = TypeGet();
if (divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
return false;
}
else if (isSignedDivide)
{
if (divisorValue == -1)
{
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
return false;
}
else if (isPow2(divisorValue))
{
return true;
}
}
else // unsigned divide
{
if (divType == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
size_t unsignedDivisorValue = (size_t)divisorValue;
if (isPow2(unsignedDivisorValue))
{
return true;
}
}
const bool isDiv = OperIs(GT_DIV, GT_UDIV);
if (isDiv)
{
if (isSignedDivide)
{
// If the divisor is the minimum representable integer value then the result is either 0 or 1
if ((divType == TYP_INT && divisorValue == INT_MIN) || (divType == TYP_LONG && divisorValue == INT64_MIN))
{
return true;
}
}
else
{
// If the divisor is greater or equal than 2^(N - 1) then the result is either 0 or 1
if (((divType == TYP_INT) && ((UINT32)divisorValue > (UINT32_MAX / 2))) ||
((divType == TYP_LONG) && ((UINT64)divisorValue > (UINT64_MAX / 2))))
{
return true;
}
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
if (!comp->opts.MinOpts() && ((divisorValue >= 3) || !isSignedDivide))
{
// All checks pass we can perform the division operation using a reciprocal multiply.
return true;
}
#endif
return false;
}
//------------------------------------------------------------------------
// CheckDivideByConstOptimized:
// Checks if we can use the division by constant optimization
// on this node
// and if so sets the flag GTF_DIV_BY_CNS_OPT and
// set GTF_DONT_CSE on the constant node
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
void GenTreeOp::CheckDivideByConstOptimized(Compiler* comp)
{
if (UsesDivideByConstOptimized(comp))
{
gtFlags |= GTF_DIV_BY_CNS_OPT;
// Now set DONT_CSE on the GT_CNS_INT divisor, note that
// with ValueNumbering we can have a non GT_CNS_INT divisior
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
if (divisor->OperIs(GT_CNS_INT))
{
divisor->gtFlags |= GTF_DONT_CSE;
}
}
}
//
//------------------------------------------------------------------------
// gtBlockOpInit: Initializes a BlkOp GenTree
//
// Arguments:
// result - an assignment node that is to be initialized.
// dst - the target (destination) we want to either initialize or copy to.
// src - the init value for InitBlk or the source struct for CpBlk/CpObj.
// isVolatile - specifies whether this node is a volatile memory operation.
//
// Assumptions:
// 'result' is an assignment that is newly constructed.
// If 'dst' is TYP_STRUCT, then it must be a block node or lclVar.
//
// Notes:
// This procedure centralizes all the logic to both enforce proper structure and
// to properly construct any InitBlk/CpBlk node.
void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
{
if (!result->OperIsBlkOp())
{
assert(dst->TypeGet() != TYP_STRUCT);
return;
}
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
* because of two reasons, first, is useless, second
* it introduces issues in liveness and also copying
* memory from an overlapping memory location is
* undefined both as per the ECMA standard and also
* the memcpy semantics specify that.
*
* NOTE: In this case we'll only detect the case for addr of a local
* and a local itself, any other complex expressions won't be
* caught.
*
* TODO-Cleanup: though having this logic is goodness (i.e. avoids self-assignment
* of struct vars very early), it was added because fgInterBlockLocalVarLiveness()
* isn't handling self-assignment of struct variables correctly. This issue may not
* surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
if (result->OperIsCopyBlkOp())
{
GenTree* currSrc = srcOrFillVal;
GenTree* currDst = dst;
if (currSrc->OperIsBlk() && (currSrc->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currSrc = currSrc->AsBlk()->Addr()->gtGetOp1();
}
if (currDst->OperIsBlk() && (currDst->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currDst = currDst->AsBlk()->Addr()->gtGetOp1();
}
if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->AsLclVarCommon()->GetLclNum() == currDst->AsLclVarCommon()->GetLclNum())
{
// Make this a NOP
// TODO-Cleanup: probably doesn't matter, but could do this earlier and avoid creating a GT_ASG
result->gtBashToNOP();
return;
}
}
// Propagate all effect flags from children
result->gtFlags |= dst->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= result->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT);
if (isVolatile)
{
result->gtFlags |= GTF_BLK_VOLATILE;
}
#ifdef FEATURE_SIMD
if (result->OperIsCopyBlkOp() && varTypeIsSIMD(srcOrFillVal))
{
// If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
// should be labeled as simd intrinsic related struct.
// This is done so that the morpher can transform any field accesses into
// intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
GenTree* src = srcOrFillVal;
if (src->OperIsIndir() && (src->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
src = src->AsIndir()->Addr()->gtGetOp1();
}
#ifdef FEATURE_HW_INTRINSICS
if ((src->OperGet() == GT_SIMD) || (src->OperGet() == GT_HWINTRINSIC))
#else
if (src->OperGet() == GT_SIMD)
#endif // FEATURE_HW_INTRINSICS
{
if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
dst = dst->AsIndir()->Addr()->gtGetOp1();
}
if (dst->OperIsLocal() && varTypeIsStruct(dst))
{
setLclRelatedToSIMDIntrinsic(dst);
}
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
//
// Arguments:
// dst - The destination node: local var / block node.
// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
// isVolatile - Whether this is a volatile memory operation or not.
// isCopyBlock - True if this is a block copy (rather than a block init).
//
// Return Value:
// Returns the newly constructed and initialized block operation.
//
GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock)
{
assert(dst->OperIsBlk() || dst->OperIsLocal());
if (isCopyBlock)
{
if (srcOrFillVal->OperIsIndir() && (srcOrFillVal->gtGetOp1()->gtOper == GT_ADDR))
{
srcOrFillVal = srcOrFillVal->gtGetOp1()->gtGetOp1();
}
}
else
{
// InitBlk
assert(varTypeIsIntegral(srcOrFillVal));
if (varTypeIsStruct(dst))
{
if (!srcOrFillVal->IsIntegralConst(0))
{
srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
}
}
}
GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
gtBlockOpInit(result, dst, srcOrFillVal, isVolatile);
return result;
}
//------------------------------------------------------------------------
// gtNewPutArgReg: Creates a new PutArgReg node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created PutArgReg node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/armel, GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg)
{
assert(arg != nullptr);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A PUTARG_REG could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_PUTARG_REG) GenTreeMultiRegOp(GT_PUTARG_REG, type, arg, nullptr);
if (type == TYP_LONG)
{
node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg);
}
#else
node = gtNewOperNode(GT_PUTARG_REG, type, arg);
#endif
node->SetRegNum(argReg);
return node;
}
//------------------------------------------------------------------------
// gtNewBitCastNode: Creates a new BitCast node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created BitCast node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/arm, as GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg)
{
assert(arg != nullptr);
assert(type != TYP_STRUCT);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr);
#else
node = gtNewOperNode(GT_BITCAST, type, arg);
#endif
return node;
}
//------------------------------------------------------------------------
// gtNewAllocObjNode: Helper to create an object allocation node.
//
// Arguments:
// pResolvedToken - Resolved token for the object being allocated
// useParent - true iff the token represents a child of the object's class
//
// Return Value:
// Returns GT_ALLOCOBJ node that will be later morphed into an
// allocation helper call or local variable allocation on the stack.
//
// Node creation can fail for inlinees when the type described by pResolvedToken
// can't be represented in jitted code. If this happens, this method will return
// nullptr.
//
GenTreeAllocObj* Compiler::gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent)
{
const bool mustRestoreHandle = true;
bool* const pRuntimeLookup = nullptr;
bool usingReadyToRunHelper = false;
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
GenTree* opHandle = impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, useParent);
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP lookup = {};
if (opts.IsReadyToRun())
{
helper = CORINFO_HELP_READYTORUN_NEW;
CORINFO_LOOKUP_KIND* const pGenericLookupKind = nullptr;
usingReadyToRunHelper =
info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup);
}
#endif
if (!usingReadyToRunHelper)
{
if (opHandle == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return nullptr;
}
}
bool helperHasSideEffects;
CorInfoHelpFunc helperTemp =
info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd, &helperHasSideEffects);
if (!usingReadyToRunHelper)
{
helper = helperTemp;
}
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newfast call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate and return the new object for boxing
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
GenTreeAllocObj* allocObj =
gtNewAllocObjNode(helper, helperHasSideEffects, pResolvedToken->hClass, TYP_REF, opHandle);
#ifdef FEATURE_READYTORUN
if (usingReadyToRunHelper)
{
assert(lookup.addr != nullptr);
allocObj->gtEntryPoint = lookup;
}
#endif
return allocObj;
}
/*****************************************************************************
*
* Clones the given tree value and returns a copy of the given tree.
* If 'complexOK' is false, the cloning is only done provided the tree
* is not too complex (whatever that may mean);
* If 'complexOK' is true, we try slightly harder to clone the tree.
* In either case, NULL is returned if the tree cannot be cloned
*
* Note that there is the function gtCloneExpr() which does a more
* complete job if you can't handle this function failing.
*/
GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
{
GenTree* copy;
switch (tree->gtOper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy = gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = new (this, GT_CNS_INT)
GenTreeIntCon(tree->gtType, tree->AsIntCon()->gtIconVal, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
break;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
break;
case GT_LCL_VAR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVarCommon()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
break;
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = new (this, tree->OperGet())
GenTreeLclFld(tree->OperGet(), tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
break;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->gtType, tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
break;
default:
if (!complexOK)
{
return nullptr;
}
if (tree->gtOper == GT_FIELD)
{
GenTree* objp = nullptr;
if (tree->AsField()->GetFldObj() != nullptr)
{
objp = gtClone(tree->AsField()->GetFldObj(), false);
if (objp == nullptr)
{
return nullptr;
}
}
copy = gtNewFieldRef(tree->TypeGet(), tree->AsField()->gtFldHnd, objp, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
}
else if (tree->OperIs(GT_ADD, GT_SUB))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->OperIsLeaf() && op2->OperIsLeaf())
{
op1 = gtClone(op1);
if (op1 == nullptr)
{
return nullptr;
}
op2 = gtClone(op2);
if (op2 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(tree->OperGet(), tree->TypeGet(), op1, op2);
}
else
{
return nullptr;
}
}
else if (tree->gtOper == GT_ADDR)
{
GenTree* op1 = gtClone(tree->AsOp()->gtOp1);
if (op1 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
}
else
{
return nullptr;
}
break;
}
copy->gtFlags |= tree->gtFlags & ~GTF_NODE_MASK;
#if defined(DEBUG)
copy->gtDebugFlags |= tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK;
#endif // defined(DEBUG)
return copy;
}
//------------------------------------------------------------------------
// gtCloneExpr: Create a copy of `tree`, adding flags `addFlags`, mapping
// local `varNum` to int constant `varVal` if it appears at
// the root, and mapping uses of local `deepVarNum` to constant
// `deepVarVal` if they occur beyond the root.
//
// Arguments:
// tree - GenTree to create a copy of
// addFlags - GTF_* flags to add to the copied tree nodes
// varNum - lclNum to replace at the root, or ~0 for no root replacement
// varVal - If replacing at root, replace local `varNum` with IntCns `varVal`
// deepVarNum - lclNum to replace uses of beyond the root, or ~0 for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Return Value:
// A copy of the given tree with the replacements and added flags specified.
//
// Notes:
// Top-level callers should generally call the overload that doesn't have
// the explicit `deepVarNum` and `deepVarVal` parameters; those are used in
// recursive invocations to avoid replacing defs.
GenTree* Compiler::gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal)
{
if (tree == nullptr)
{
return nullptr;
}
/* Figure out what kind of a node we have */
genTreeOps oper = tree->OperGet();
unsigned kind = tree->OperKind();
GenTree* copy;
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy =
gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = gtNewIconNode(tree->AsIntCon()->gtIconVal, tree->gtType);
#ifdef DEBUG
copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
#endif
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->AsIntCon()->gtFieldSeq = tree->AsIntCon()->gtFieldSeq;
}
goto DONE;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
goto DONE;
case GT_CNS_DBL:
copy = gtNewDconNode(tree->AsDblCon()->gtDconVal);
copy->gtType = tree->gtType; // keep the same type
goto DONE;
case GT_CNS_STR:
copy = gtNewSconNode(tree->AsStrCon()->gtSconCPX, tree->AsStrCon()->gtScpHnd);
goto DONE;
case GT_LCL_VAR:
if (tree->AsLclVarCommon()->GetLclNum() == varNum)
{
copy = gtNewIconNode(varVal, tree->gtType);
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
copy->LabelIndex(this);
}
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVar()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
}
goto DONE;
case GT_LCL_FLD:
if (tree->AsLclFld()->GetLclNum() == varNum)
{
IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy =
new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
copy->gtFlags = tree->gtFlags;
}
goto DONE;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->TypeGet(), tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
goto DONE;
case GT_RET_EXPR:
// GT_RET_EXPR is unique node, that contains a link to a gtInlineCandidate node,
// that is part of another statement. We cannot clone both here and cannot
// create another GT_RET_EXPR that points to the same gtInlineCandidate.
NO_WAY("Cloning of GT_RET_EXPR node not supported");
goto DONE;
case GT_MEMORYBARRIER:
copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
goto DONE;
case GT_ARGPLACE:
copy = gtNewArgPlaceHolderNode(tree->gtType, tree->AsArgPlace()->gtArgPlaceClsHnd);
goto DONE;
case GT_FTN_ADDR:
copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->AsFptrVal()->gtFptrMethod);
#ifdef FEATURE_READYTORUN
copy->AsFptrVal()->gtEntryPoint = tree->AsFptrVal()->gtEntryPoint;
#endif
goto DONE;
case GT_CATCH_ARG:
case GT_NO_OP:
case GT_LABEL:
copy = new (this, oper) GenTree(oper, tree->gtType);
goto DONE;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_JMP:
copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1);
goto DONE;
case GT_LCL_VAR_ADDR:
copy = new (this, oper) GenTreeLclVar(oper, tree->TypeGet(), tree->AsLclVar()->GetLclNum());
goto DONE;
case GT_LCL_FLD_ADDR:
copy = new (this, oper)
GenTreeLclFld(oper, tree->TypeGet(), tree->AsLclFld()->GetLclNum(), tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
goto DONE;
default:
NO_WAY("Cloning of node not supported");
goto DONE;
}
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
CLANG_FORMAT_COMMENT_ANCHOR;
switch (oper)
{
/* These nodes sometimes get bashed to "fat" ones */
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
// In the implementation of gtNewLargeOperNode you have
// to give an oper that will create a small node,
// otherwise it asserts.
//
if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1,
tree->OperIsBinary() ? tree->AsOp()->gtOp2 : nullptr);
}
else // Always a large tree
{
if (tree->OperIsBinary())
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
else
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1);
}
}
break;
case GT_CAST:
copy = new (this, LargeOpOpcode())
GenTreeCast(tree->TypeGet(), tree->AsCast()->CastOp(), tree->IsUnsigned(),
tree->AsCast()->gtCastType DEBUGARG(/*largeNode*/ TRUE));
break;
case GT_INDEX:
{
GenTreeIndex* asInd = tree->AsIndex();
copy = new (this, GT_INDEX)
GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
copy->AsIndex()->gtStructElemClass = asInd->gtStructElemClass;
}
break;
case GT_INDEX_ADDR:
{
GenTreeIndexAddr* asIndAddr = tree->AsIndexAddr();
copy = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(asIndAddr->Arr(), asIndAddr->Index(), asIndAddr->gtElemType,
asIndAddr->gtStructElemClass, asIndAddr->gtElemSize, asIndAddr->gtLenOffset,
asIndAddr->gtElemOffset);
copy->AsIndexAddr()->gtIndRngFailBB = asIndAddr->gtIndRngFailBB;
}
break;
case GT_ALLOCOBJ:
{
GenTreeAllocObj* asAllocObj = tree->AsAllocObj();
copy = new (this, GT_ALLOCOBJ)
GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper, asAllocObj->gtHelperHasSideEffects,
asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
#ifdef FEATURE_READYTORUN
copy->AsAllocObj()->gtEntryPoint = asAllocObj->gtEntryPoint;
#endif
}
break;
case GT_RUNTIMELOOKUP:
{
GenTreeRuntimeLookup* asRuntimeLookup = tree->AsRuntimeLookup();
copy = new (this, GT_RUNTIMELOOKUP)
GenTreeRuntimeLookup(asRuntimeLookup->gtHnd, asRuntimeLookup->gtHndType, asRuntimeLookup->gtOp1);
}
break;
case GT_ARR_LENGTH:
copy = gtNewArrLen(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsArrLen()->ArrLenOffset(), nullptr);
break;
case GT_ARR_INDEX:
copy = new (this, GT_ARR_INDEX)
GenTreeArrIndex(tree->TypeGet(),
gtCloneExpr(tree->AsArrIndex()->ArrObj(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrIndex()->IndexExpr(), addFlags, deepVarNum, deepVarVal),
tree->AsArrIndex()->gtCurrDim, tree->AsArrIndex()->gtArrRank,
tree->AsArrIndex()->gtArrElemType);
break;
case GT_QMARK:
copy = new (this, GT_QMARK)
GenTreeQmark(tree->TypeGet(), tree->AsOp()->gtGetOp1(), tree->AsOp()->gtGetOp2()->AsColon());
break;
case GT_OBJ:
copy =
new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout());
break;
case GT_BLK:
copy = new (this, GT_BLK)
GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout());
break;
case GT_FIELD:
copy = new (this, GT_FIELD) GenTreeField(tree->TypeGet(), tree->AsField()->GetFldObj(),
tree->AsField()->gtFldHnd, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
break;
case GT_BOX:
copy = new (this, GT_BOX)
GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtAsgStmtWhenInlinedBoxValue,
tree->AsBox()->gtCopyStmtWhenInlinedBoxValue);
break;
case GT_INTRINSIC:
copy = new (this, GT_INTRINSIC)
GenTreeIntrinsic(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2,
tree->AsIntrinsic()->gtIntrinsicName, tree->AsIntrinsic()->gtMethodHandle);
#ifdef FEATURE_READYTORUN
copy->AsIntrinsic()->gtEntryPoint = tree->AsIntrinsic()->gtEntryPoint;
#endif
break;
case GT_BOUNDS_CHECK:
copy = new (this, GT_BOUNDS_CHECK)
GenTreeBoundsChk(tree->AsBoundsChk()->GetIndex(), tree->AsBoundsChk()->GetArrayLength(),
tree->AsBoundsChk()->gtThrowKind);
copy->AsBoundsChk()->gtIndRngFailBB = tree->AsBoundsChk()->gtIndRngFailBB;
break;
case GT_LEA:
{
GenTreeAddrMode* addrModeOp = tree->AsAddrMode();
copy = new (this, GT_LEA)
GenTreeAddrMode(addrModeOp->TypeGet(), addrModeOp->Base(), addrModeOp->Index(), addrModeOp->gtScale,
static_cast<unsigned>(addrModeOp->Offset()));
}
break;
case GT_COPY:
case GT_RELOAD:
{
copy = new (this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
}
break;
default:
assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
// We're in the SimpleOp case, so it's always unary or binary.
if (GenTree::OperIsUnary(tree->OperGet()))
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, /*doSimplifications*/ false);
}
else
{
assert(GenTree::OperIsBinary(tree->OperGet()));
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
break;
}
// Some flags are conceptually part of the gtOper, and should be copied immediately.
if (tree->gtOverflowEx())
{
copy->gtFlags |= GTF_OVERFLOW;
}
if (tree->AsOp()->gtOp1)
{
if (tree->gtOper == GT_ASG)
{
// Don't replace varNum if it appears as the LHS of an assign.
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, -1, 0, deepVarNum, deepVarVal);
}
else
{
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal);
}
}
if (tree->gtGetOp2IfPresent())
{
copy->AsOp()->gtOp2 = gtCloneExpr(tree->AsOp()->gtOp2, addFlags, deepVarNum, deepVarVal);
}
/* Flags */
addFlags |= tree->gtFlags;
// Copy any node annotations, if necessary.
switch (tree->gtOper)
{
case GT_STOREIND:
case GT_IND:
case GT_OBJ:
case GT_STORE_OBJ:
{
ArrayInfo arrInfo;
if (!tree->AsIndir()->gtOp1->OperIs(GT_INDEX_ADDR) && TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
GetArrayInfoMap()->Set(copy, arrInfo);
}
}
break;
default:
break;
}
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Effects flags propagate upwards.
if (copy->AsOp()->gtOp1 != nullptr)
{
copy->gtFlags |= (copy->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
}
if (copy->gtGetOp2IfPresent() != nullptr)
{
copy->gtFlags |= (copy->gtGetOp2()->gtFlags & GTF_ALL_EFFECT);
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
// We can't safely clone calls that have GT_RET_EXPRs via gtCloneExpr.
// You must use gtCloneCandidateCall for these calls (and then do appropriate other fixup)
if (tree->AsCall()->IsInlineCandidate() || tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
NO_WAY("Cloning of calls with associated GT_RET_EXPR nodes is not supported");
}
copy = gtCloneExprCallHelper(tree->AsCall(), addFlags, deepVarNum, deepVarVal);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
copy = new (this, GT_SIMD)
GenTreeSIMD(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsSIMD()),
tree->AsSIMD()->GetSIMDIntrinsicId(), tree->AsSIMD()->GetSimdBaseJitType(),
tree->AsSIMD()->GetSimdSize());
goto CLONE_MULTIOP_OPERANDS;
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()),
tree->AsHWIntrinsic()->GetHWIntrinsicId(),
tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(),
tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType());
goto CLONE_MULTIOP_OPERANDS;
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
CLONE_MULTIOP_OPERANDS:
for (GenTree** use : copy->AsMultiOp()->UseEdges())
{
*use = gtCloneExpr(*use, addFlags, deepVarNum, deepVarVal);
}
break;
#endif
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
inds[dim] = gtCloneExpr(arrElem->gtArrInds[dim], addFlags, deepVarNum, deepVarVal);
}
copy = new (this, GT_ARR_ELEM)
GenTreeArrElem(arrElem->TypeGet(), gtCloneExpr(arrElem->gtArrObj, addFlags, deepVarNum, deepVarVal),
arrElem->gtArrRank, arrElem->gtArrElemSize, arrElem->gtArrElemType, &inds[0]);
}
break;
case GT_ARR_OFFSET:
{
copy = new (this, GT_ARR_OFFSET)
GenTreeArrOffs(tree->TypeGet(),
gtCloneExpr(tree->AsArrOffs()->gtOffset, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtIndex, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtArrObj, addFlags, deepVarNum, deepVarVal),
tree->AsArrOffs()->gtCurrDim, tree->AsArrOffs()->gtArrRank,
tree->AsArrOffs()->gtArrElemType);
}
break;
case GT_PHI:
{
copy = new (this, GT_PHI) GenTreePhi(tree->TypeGet());
GenTreePhi::Use** prevUse = ©->AsPhi()->gtUses;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
*prevUse = new (this, CMK_ASTNode)
GenTreePhi::Use(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal), *prevUse);
prevUse = &((*prevUse)->NextRef());
}
}
break;
case GT_FIELD_LIST:
copy = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
copy->AsFieldList()->AddField(this, gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal),
use.GetOffset(), use.GetType());
}
break;
case GT_CMPXCHG:
copy = new (this, GT_CMPXCHG)
GenTreeCmpXchg(tree->TypeGet(),
gtCloneExpr(tree->AsCmpXchg()->gtOpLocation, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpValue, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpComparand, addFlags, deepVarNum, deepVarVal));
break;
case GT_STORE_DYN_BLK:
copy = new (this, oper)
GenTreeStoreDynBlk(gtCloneExpr(tree->AsStoreDynBlk()->Addr(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->Data(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->gtDynamicSize, addFlags, deepVarNum, deepVarVal));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
NO_WAY("unexpected operator");
}
DONE:
// If it has a zero-offset field seq, copy annotation.
if (tree->TypeGet() == TYP_BYREF)
{
FieldSeqNode* fldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
{
fgAddFieldSeqForZeroOffset(copy, fldSeq);
}
}
copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* Compute the flags for the copied node. Note that we can do this only
if we didnt gtFoldExpr(copy) */
if (copy->gtOper == oper)
{
addFlags |= tree->gtFlags;
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
copy->gtFlags |= addFlags;
// Update side effect flags since they may be different from the source side effect flags.
// For example, we may have replaced some locals with constants and made indirections non-throwing.
gtUpdateNodeSideEffects(copy);
}
/* GTF_COLON_COND should be propagated from 'tree' to 'copy' */
copy->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
#if defined(DEBUG)
// Non-node debug flags should be propagated from 'tree' to 'copy'
copy->gtDebugFlags |= (tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
/* Make sure to copy back fields that may have been initialized */
copy->CopyRawCosts(tree);
copy->gtRsvdRegs = tree->gtRsvdRegs;
copy->CopyReg(tree);
return copy;
}
//------------------------------------------------------------------------
// gtCloneExprCallHelper: clone a call tree
//
// Notes:
// Do not invoke this method directly, instead call either gtCloneExpr
// or gtCloneCandidateCall, as appropriate.
//
// Arguments:
// tree - the call to clone
// addFlags - GTF_* flags to add to the copied tree nodes
// deepVarNum - lclNum to replace uses of beyond the root, or BAD_VAR_NUM for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree,
GenTreeFlags addFlags,
unsigned deepVarNum,
int deepVarVal)
{
GenTreeCall* copy = new (this, GT_CALL) GenTreeCall(tree->TypeGet());
if (tree->gtCallThisArg == nullptr)
{
copy->gtCallThisArg = nullptr;
}
else
{
copy->gtCallThisArg =
gtNewCallArgs(gtCloneExpr(tree->gtCallThisArg->GetNode(), addFlags, deepVarNum, deepVarVal));
}
copy->gtCallMoreFlags = tree->gtCallMoreFlags;
copy->gtCallArgs = nullptr;
copy->gtCallLateArgs = nullptr;
GenTreeCall::Use** argsTail = ©->gtCallArgs;
for (GenTreeCall::Use& use : tree->Args())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
argsTail = ©->gtCallLateArgs;
for (GenTreeCall::Use& use : tree->LateArgs())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
// The call sig comes from the EE and doesn't change throughout the compilation process, meaning
// we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
// (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
// because the inlinee still uses the inliner's memory allocator anyway.)
INDEBUG(copy->callSig = tree->callSig;)
// The tail call info does not change after it is allocated, so for the same reasons as above
// a shallow copy suffices.
copy->tailCallInfo = tree->tailCallInfo;
copy->gtRetClsHnd = tree->gtRetClsHnd;
copy->gtControlExpr = gtCloneExpr(tree->gtControlExpr, addFlags, deepVarNum, deepVarVal);
copy->gtStubCallStubAddr = tree->gtStubCallStubAddr;
/* Copy the union */
if (tree->gtCallType == CT_INDIRECT)
{
copy->gtCallCookie =
tree->gtCallCookie ? gtCloneExpr(tree->gtCallCookie, addFlags, deepVarNum, deepVarVal) : nullptr;
copy->gtCallAddr = tree->gtCallAddr ? gtCloneExpr(tree->gtCallAddr, addFlags, deepVarNum, deepVarVal) : nullptr;
}
else
{
copy->gtCallMethHnd = tree->gtCallMethHnd;
copy->gtInlineCandidateInfo = tree->gtInlineCandidateInfo;
}
copy->gtCallType = tree->gtCallType;
copy->gtReturnType = tree->gtReturnType;
if (tree->fgArgInfo)
{
// Create and initialize the fgArgInfo for our copy of the call tree
copy->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
}
else
{
copy->fgArgInfo = nullptr;
}
#if FEATURE_MULTIREG_RET
copy->gtReturnTypeDesc = tree->gtReturnTypeDesc;
#endif
#ifdef FEATURE_READYTORUN
copy->setEntryPoint(tree->gtEntryPoint);
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
copy->gtInlineObservation = tree->gtInlineObservation;
copy->gtRawILOffset = tree->gtRawILOffset;
copy->gtInlineContext = tree->gtInlineContext;
#endif
copy->CopyOtherRegFlags(tree);
// We keep track of the number of no return calls, so if we've cloned
// one of these, update the tracking.
//
if (tree->IsNoReturn())
{
assert(copy->IsNoReturn());
setMethodHasNoReturnCalls();
}
return copy;
}
//------------------------------------------------------------------------
// gtCloneCandidateCall: clone a call that is an inline or guarded
// devirtualization candidate (~ any call that can have a GT_RET_EXPR)
//
// Notes:
// If the call really is a candidate, the caller must take additional steps
// after cloning to re-establish candidate info and the relationship between
// the candidate and any associated GT_RET_EXPR.
//
// Arguments:
// call - the call to clone
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneCandidateCall(GenTreeCall* call)
{
assert(call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate());
GenTreeCall* result = gtCloneExprCallHelper(call);
// There is some common post-processing in gtCloneExpr that we reproduce
// here, for the fields that make sense for candidate calls.
result->gtFlags |= call->gtFlags;
#if defined(DEBUG)
result->gtDebugFlags |= (call->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
result->CopyReg(call);
return result;
}
//------------------------------------------------------------------------
// gtUpdateSideEffects: Update the side effects of a tree and its ancestors
//
// Arguments:
// stmt - The tree's statement
// tree - Tree to update the side effects for
//
// Note: If tree's order hasn't been established, the method updates side effect
// flags on all statement's nodes.
void Compiler::gtUpdateSideEffects(Statement* stmt, GenTree* tree)
{
if (fgStmtListThreaded)
{
gtUpdateTreeAncestorsSideEffects(tree);
}
else
{
gtUpdateStmtSideEffects(stmt);
}
}
//------------------------------------------------------------------------
// gtUpdateTreeAncestorsSideEffects: Update the side effects of a tree and its ancestors
// when statement order has been established.
//
// Arguments:
// tree - Tree to update the side effects for
//
void Compiler::gtUpdateTreeAncestorsSideEffects(GenTree* tree)
{
assert(fgStmtListThreaded);
while (tree != nullptr)
{
gtUpdateNodeSideEffects(tree);
tree = tree->gtGetParent(nullptr);
}
}
//------------------------------------------------------------------------
// gtUpdateStmtSideEffects: Update the side effects for statement tree nodes.
//
// Arguments:
// stmt - The statement to update side effects on
//
void Compiler::gtUpdateStmtSideEffects(Statement* stmt)
{
fgWalkTree(stmt->GetRootNodePointer(), fgUpdateSideEffectsPre, fgUpdateSideEffectsPost);
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffects: Update the side effects based on the node operation.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
// The caller of this method is expected to update the flags based on the children's flags.
//
void Compiler::gtUpdateNodeOperSideEffects(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
tree->gtFlags &= ~GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
tree->gtFlags &= ~GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffectsPost: Update the side effects based on the node operation,
// in the post-order visit of a tree walk. It is expected that the pre-order visit cleared
// the bits, so the post-order visit only sets them. This is important for binary nodes
// where one child already may have set the GTF_EXCEPT bit. Note that `SetIndirExceptionFlags`
// looks at its child, which is why we need to do this in a bottom-up walk.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_ASG, GTF_CALL, and GTF_EXCEPT flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeOperSideEffectsPost(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeSideEffects: Update the side effects based on the node operation and
// children's side efects.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeSideEffects(GenTree* tree)
{
gtUpdateNodeOperSideEffects(tree);
tree->VisitOperands([tree](GenTree* operand) -> GenTree::VisitResult {
tree->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
return GenTree::VisitResult::Continue;
});
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPre: Update the side effects based on the tree operation.
// The pre-visit walk clears GTF_ASG, GTF_CALL, and GTF_EXCEPT; the post-visit walk sets
// the bits as necessary.
//
// Arguments:
// pTree - Pointer to the tree to update the side effects
// fgWalkPre - Walk data
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPre(GenTree** pTree, fgWalkData* fgWalkPre)
{
GenTree* tree = *pTree;
tree->gtFlags &= ~(GTF_ASG | GTF_CALL | GTF_EXCEPT);
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPost: Update the side effects of the node and parent based on the tree's flags.
//
// Arguments:
// pTree - Pointer to the tree
// fgWalkPost - Walk data
//
// Notes:
// The routine is used for updating the stale side effect flags for ancestor
// nodes starting from treeParent up to the top-level stmt expr.
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPost(GenTree** pTree, fgWalkData* fgWalkPost)
{
GenTree* tree = *pTree;
// Update the node's side effects first.
fgWalkPost->compiler->gtUpdateNodeOperSideEffectsPost(tree);
// If this node is an indir or array length, and it doesn't have the GTF_EXCEPT bit set, we
// set the GTF_IND_NONFAULTING bit. This needs to be done after all children, and this node, have
// been processed.
if (tree->OperIsIndirOrArrLength() && ((tree->gtFlags & GTF_EXCEPT) == 0))
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
// Then update the parent's side effects based on this node.
GenTree* parent = fgWalkPost->parent;
if (parent != nullptr)
{
parent->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
}
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// gtGetThisArg: Return this pointer node for the call.
//
// Arguments:
// call - the call node with a this argument.
//
// Return value:
// the this pointer node.
//
GenTree* Compiler::gtGetThisArg(GenTreeCall* call)
{
assert(call->gtCallThisArg != nullptr);
GenTree* thisArg = call->gtCallThisArg->GetNode();
if (!thisArg->OperIs(GT_ASG))
{
if ((thisArg->gtFlags & GTF_LATE_ARG) == 0)
{
return thisArg;
}
}
assert(call->gtCallLateArgs != nullptr);
unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, argNum);
GenTree* result = thisArgTabEntry->GetNode();
// Assert if we used DEBUG_DESTROY_NODE.
assert(result->gtOper != GT_COUNT);
return result;
}
bool GenTree::gtSetFlags() const
{
//
// When FEATURE_SET_FLAGS (TARGET_ARM) is active the method returns true
// when the gtFlags has the flag GTF_SET_FLAGS set
// otherwise the architecture will be have instructions that typically set
// the flags and this method will return true.
//
// Exceptions: GT_IND (load/store) is not allowed to set the flags
// and on XARCH the GT_MUL/GT_DIV and all overflow instructions
// do not set the condition flags
//
// Precondition we have a GTK_SMPOP
//
if (!varTypeIsIntegralOrI(TypeGet()) && (TypeGet() != TYP_VOID))
{
return false;
}
if (((gtFlags & GTF_SET_FLAGS) != 0) && (gtOper != GT_IND))
{
// GTF_SET_FLAGS is not valid on GT_IND and is overlaid with GTF_NONFAULTING_IND
return true;
}
else
{
return false;
}
}
bool GenTree::gtRequestSetFlags()
{
bool result = false;
#if FEATURE_SET_FLAGS
// This method is a Nop unless FEATURE_SET_FLAGS is defined
// In order to set GTF_SET_FLAGS
// we must have a GTK_SMPOP
// and we have a integer or machine size type (not floating point or TYP_LONG on 32-bit)
//
if (!OperIsSimple())
return false;
if (!varTypeIsIntegralOrI(TypeGet()))
return false;
switch (gtOper)
{
case GT_IND:
case GT_ARR_LENGTH:
// These will turn into simple load from memory instructions
// and we can't force the setting of the flags on load from memory
break;
case GT_MUL:
case GT_DIV:
// These instructions don't set the flags (on x86/x64)
//
break;
default:
// Otherwise we can set the flags for this gtOper
// and codegen must set the condition flags.
//
gtFlags |= GTF_SET_FLAGS;
result = true;
break;
}
#endif // FEATURE_SET_FLAGS
// Codegen for this tree must set the condition flags if
// this method returns true.
//
return result;
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator()
: m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1)
{
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
: m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0)
{
assert(m_node != nullptr);
// NOTE: the switch statement below must be updated when introducing new nodes.
switch (m_node->OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
m_state = -1;
return;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
case GT_RETURNTRAP:
m_edge = &m_node->AsUnOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
return;
// Unary operators with an optional operand
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
if (m_node->AsUnOp()->gtOp1 == nullptr)
{
assert(m_node->NullOp1Legal());
m_state = -1;
}
else
{
m_edge = &m_node->AsUnOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
return;
// Variadic nodes
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
SetEntryStateForMultiOp();
return;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// LEA, which may have no first operand
case GT_LEA:
if (m_node->AsAddrMode()->gtOp1 == nullptr)
{
m_edge = &m_node->AsAddrMode()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else
{
SetEntryStateForBinOp();
}
return;
// Special nodes
case GT_FIELD_LIST:
m_statePtr = m_node->AsFieldList()->Uses().GetHead();
m_advance = &GenTreeUseEdgeIterator::AdvanceFieldList;
AdvanceFieldList();
return;
case GT_PHI:
m_statePtr = m_node->AsPhi()->gtUses;
m_advance = &GenTreeUseEdgeIterator::AdvancePhi;
AdvancePhi();
return;
case GT_CMPXCHG:
m_edge = &m_node->AsCmpXchg()->gtOpLocation;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceCmpXchg;
return;
case GT_ARR_ELEM:
m_edge = &m_node->AsArrElem()->gtArrObj;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrElem;
return;
case GT_ARR_OFFSET:
m_edge = &m_node->AsArrOffs()->gtOffset;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrOffset;
return;
case GT_STORE_DYN_BLK:
m_edge = &m_node->AsStoreDynBlk()->Addr();
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceStoreDynBlk;
return;
case GT_CALL:
AdvanceCall<CALL_INSTANCE>();
return;
// Binary nodes
default:
assert(m_node->OperIsBinary());
SetEntryStateForBinOp();
return;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCmpXchg: produces the next operand of a CmpXchg node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceCmpXchg()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsCmpXchg()->gtOpValue;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsCmpXchg()->gtOpComparand;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrElem: produces the next operand of a ArrElem node and advances the state.
//
// Because these nodes are variadic, this function uses `m_state` to index into the list of array indices.
//
void GenTreeUseEdgeIterator::AdvanceArrElem()
{
if (m_state < m_node->AsArrElem()->gtArrRank)
{
m_edge = &m_node->AsArrElem()->gtArrInds[m_state];
assert(*m_edge != nullptr);
m_state++;
}
else
{
m_state = -1;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrOffset: produces the next operand of a ArrOffset node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceArrOffset()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsArrOffs()->gtIndex;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsArrOffs()->gtArrObj;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceStoreDynBlk: produces the next operand of a StoreDynBlk node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceStoreDynBlk()
{
GenTreeStoreDynBlk* const dynBlock = m_node->AsStoreDynBlk();
switch (m_state)
{
case 0:
m_edge = &dynBlock->Data();
m_state = 1;
break;
case 1:
m_edge = &dynBlock->gtDynamicSize;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceFieldList: produces the next operand of a FieldList node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceFieldList()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreeFieldList::Use* currentUse = static_cast<GenTreeFieldList::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvancePhi: produces the next operand of a Phi node and advances the state.
//
void GenTreeUseEdgeIterator::AdvancePhi()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreePhi::Use* currentUse = static_cast<GenTreePhi::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceBinOp: produces the next operand of a binary node and advances the state.
//
// This function must be instantiated s.t. `ReverseOperands` is `true` iff the node is marked with the
// `GTF_REVERSE_OPS` flag.
//
template <bool ReverseOperands>
void GenTreeUseEdgeIterator::AdvanceBinOp()
{
assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0));
m_edge = !ReverseOperands ? &m_node->AsOp()->gtOp2 : &m_node->AsOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForBinOp: produces the first operand of a binary node and chooses
// the appropriate advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForBinOp()
{
assert(m_node != nullptr);
assert(m_node->OperIsBinary());
GenTreeOp* const node = m_node->AsOp();
if (node->gtOp2 == nullptr)
{
assert(node->gtOp1 != nullptr);
assert(node->NullOp2Legal());
m_edge = &node->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else if ((node->gtFlags & GTF_REVERSE_OPS) != 0)
{
m_edge = &m_node->AsOp()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<true>;
}
else
{
m_edge = &m_node->AsOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<false>;
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceMultiOp: produces the next operand of a multi-op node and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// incrementing the "m_edge" pointer, unless the end, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
m_edge++;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceReversedMultiOp: produces the next operand of a multi-op node
// marked with GTF_REVRESE_OPS and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// decrementing the "m_edge" pointer, unless the beginning, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceReversedMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
assert((m_node->AsMultiOp()->GetOperandCount() == 2) && m_node->IsReverseOp());
m_edge--;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForMultiOp: produces the first operand of a multi-op node and sets the
// required advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForMultiOp()
{
size_t operandCount = m_node->AsMultiOp()->GetOperandCount();
if (operandCount == 0)
{
Terminate();
}
else
{
if (m_node->IsReverseOp())
{
assert(operandCount == 2);
m_edge = m_node->AsMultiOp()->GetOperandArray() + 1;
m_statePtr = m_node->AsMultiOp()->GetOperandArray() - 1;
m_advance = &GenTreeUseEdgeIterator::AdvanceReversedMultiOp;
}
else
{
m_edge = m_node->AsMultiOp()->GetOperandArray();
m_statePtr = m_node->AsMultiOp()->GetOperandArray(operandCount);
m_advance = &GenTreeUseEdgeIterator::AdvanceMultiOp;
}
}
}
#endif
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCall: produces the next operand of a call node and advances the state.
//
// This function is a bit tricky: in order to avoid doing unnecessary work, it is instantiated with the
// state number the iterator will be in when it is called. For example, `AdvanceCall<CALL_INSTANCE>`
// is the instantiation used when the iterator is at the `CALL_INSTANCE` state (i.e. the entry state).
// This sort of templating allows each state to avoid processing earlier states without unnecessary
// duplication of code.
//
// Note that this method expands the argument lists (`gtCallArgs` and `gtCallLateArgs`) into their
// component operands.
//
template <int state>
void GenTreeUseEdgeIterator::AdvanceCall()
{
GenTreeCall* const call = m_node->AsCall();
switch (state)
{
case CALL_INSTANCE:
m_statePtr = call->gtCallArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ARGS>;
if (call->gtCallThisArg != nullptr)
{
m_edge = &call->gtCallThisArg->NodeRef();
return;
}
FALLTHROUGH;
case CALL_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_statePtr = call->gtCallLateArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_LATE_ARGS>;
FALLTHROUGH;
case CALL_LATE_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_CONTROL_EXPR>;
FALLTHROUGH;
case CALL_CONTROL_EXPR:
if (call->gtControlExpr != nullptr)
{
if (call->gtCallType == CT_INDIRECT)
{
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_COOKIE>;
}
else
{
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
m_edge = &call->gtControlExpr;
return;
}
else if (call->gtCallType != CT_INDIRECT)
{
m_state = -1;
return;
}
FALLTHROUGH;
case CALL_COOKIE:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ADDRESS>;
if (call->gtCallCookie != nullptr)
{
m_edge = &call->gtCallCookie;
return;
}
FALLTHROUGH;
case CALL_ADDRESS:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::Terminate;
if (call->gtCallAddr != nullptr)
{
m_edge = &call->gtCallAddr;
}
return;
default:
unreached();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::Terminate: advances the iterator to the terminal state.
//
void GenTreeUseEdgeIterator::Terminate()
{
m_state = -1;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::operator++: advances the iterator to the next operand.
//
GenTreeUseEdgeIterator& GenTreeUseEdgeIterator::operator++()
{
// If we've reached the terminal state, do nothing.
if (m_state != -1)
{
(this->*m_advance)();
}
return *this;
}
GenTreeUseEdgeIterator GenTree::UseEdgesBegin()
{
return GenTreeUseEdgeIterator(this);
}
GenTreeUseEdgeIterator GenTree::UseEdgesEnd()
{
return GenTreeUseEdgeIterator();
}
IteratorPair<GenTreeUseEdgeIterator> GenTree::UseEdges()
{
return MakeIteratorPair(UseEdgesBegin(), UseEdgesEnd());
}
GenTreeOperandIterator GenTree::OperandsBegin()
{
return GenTreeOperandIterator(this);
}
GenTreeOperandIterator GenTree::OperandsEnd()
{
return GenTreeOperandIterator();
}
IteratorPair<GenTreeOperandIterator> GenTree::Operands()
{
return MakeIteratorPair(OperandsBegin(), OperandsEnd());
}
bool GenTree::Precedes(GenTree* other)
{
assert(other != nullptr);
for (GenTree* node = gtNext; node != nullptr; node = node->gtNext)
{
if (node == other)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------------
// SetIndirExceptionFlags : Set GTF_EXCEPT and GTF_IND_NONFAULTING flags as appropriate
// on an indirection or an array length node.
//
// Arguments:
// comp - compiler instance
//
void GenTree::SetIndirExceptionFlags(Compiler* comp)
{
assert(OperIsIndirOrArrLength());
if (OperMayThrow(comp))
{
gtFlags |= GTF_EXCEPT;
return;
}
GenTree* addr = nullptr;
if (OperIsIndir())
{
addr = AsIndir()->Addr();
}
else
{
assert(gtOper == GT_ARR_LENGTH);
addr = AsArrLen()->ArrRef();
}
if ((addr->gtFlags & GTF_EXCEPT) != 0)
{
gtFlags |= GTF_EXCEPT;
}
else
{
gtFlags &= ~GTF_EXCEPT;
gtFlags |= GTF_IND_NONFAULTING;
}
}
#ifdef DEBUG
/* static */ int GenTree::gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags)
{
int charsDisplayed = 11; // 11 is the "baseline" number of flag characters displayed
printf("%c", (flags & GTF_ASG) ? 'A' : (IsContained(flags) ? 'c' : '-'));
printf("%c", (flags & GTF_CALL) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-');
printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
(flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-'
printf("%c", (flags & GTF_COLON_COND) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-');
#if FEATURE_SET_FLAGS
printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-');
++charsDisplayed;
#endif
printf("%c", (flags & GTF_LATE_ARG) ? 'L' : '-');
printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-');
return charsDisplayed;
}
#ifdef TARGET_X86
inline const char* GetCallConvName(CorInfoCallConvExtension callConv)
{
switch (callConv)
{
case CorInfoCallConvExtension::Managed:
return "Managed";
case CorInfoCallConvExtension::C:
return "C";
case CorInfoCallConvExtension::Stdcall:
return "Stdcall";
case CorInfoCallConvExtension::Thiscall:
return "Thiscall";
case CorInfoCallConvExtension::Fastcall:
return "Fastcall";
case CorInfoCallConvExtension::CMemberFunction:
return "CMemberFunction";
case CorInfoCallConvExtension::StdcallMemberFunction:
return "StdcallMemberFunction";
case CorInfoCallConvExtension::FastcallMemberFunction:
return "FastcallMemberFunction";
default:
return "UnknownCallConv";
}
}
#endif // TARGET_X86
/*****************************************************************************/
void Compiler::gtDispNodeName(GenTree* tree)
{
/* print the node name */
const char* name;
assert(tree);
if (tree->gtOper < GT_COUNT)
{
name = GenTree::OpName(tree->OperGet());
}
else
{
name = "<ERROR>";
}
char buf[32];
char* bufp = &buf[0];
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
sprintf_s(bufp, sizeof(buf), " %s(h)%c", name, 0);
}
else if (tree->gtOper == GT_PUTARG_STK)
{
sprintf_s(bufp, sizeof(buf), " %s [+0x%02x]%c", name, tree->AsPutArgStk()->getArgOffset(), 0);
}
else if (tree->gtOper == GT_CALL)
{
const char* callType = "CALL";
const char* gtfType = "";
const char* ctType = "";
char gtfTypeBuf[100];
if (tree->AsCall()->gtCallType == CT_USER_FUNC)
{
if (tree->AsCall()->IsVirtual())
{
callType = "CALLV";
}
}
else if (tree->AsCall()->gtCallType == CT_HELPER)
{
ctType = " help";
}
else if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
ctType = " ind";
}
else
{
assert(!"Unknown gtCallType");
}
if (tree->gtFlags & GTF_CALL_NULLCHECK)
{
gtfType = " nullcheck";
}
if (tree->AsCall()->IsVirtualVtable())
{
gtfType = " vt-ind";
}
else if (tree->AsCall()->IsVirtualStub())
{
gtfType = " stub";
}
#ifdef FEATURE_READYTORUN
else if (tree->AsCall()->IsR2RRelativeIndir())
{
gtfType = " r2r_ind";
}
#endif // FEATURE_READYTORUN
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
char* gtfTypeBufWalk = gtfTypeBuf;
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " unman");
if (tree->gtFlags & GTF_CALL_POP_ARGS)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " popargs");
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " thiscall");
}
#ifdef TARGET_X86
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " %s",
GetCallConvName(tree->AsCall()->GetUnmanagedCallConv()));
#endif // TARGET_X86
gtfType = gtfTypeBuf;
}
sprintf_s(bufp, sizeof(buf), " %s%s%s%c", callType, ctType, gtfType, 0);
}
else if (tree->gtOper == GT_ARR_ELEM)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
for (unsigned rank = tree->AsArrElem()->gtArrRank - 1; rank; rank--)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_ARR_OFFSET || tree->gtOper == GT_ARR_INDEX)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
unsigned char currDim;
unsigned char rank;
if (tree->gtOper == GT_ARR_OFFSET)
{
currDim = tree->AsArrOffs()->gtCurrDim;
rank = tree->AsArrOffs()->gtArrRank;
}
else
{
currDim = tree->AsArrIndex()->gtCurrDim;
rank = tree->AsArrIndex()->gtArrRank;
}
for (unsigned char dim = 0; dim < rank; dim++)
{
// Use a defacto standard i,j,k for the dimensions.
// Note that we only support up to rank 3 arrays with these nodes, so we won't run out of characters.
char dimChar = '*';
if (dim == currDim)
{
dimChar = 'i' + dim;
}
else if (dim > currDim)
{
dimChar = ' ';
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%c", dimChar);
if (dim != rank - 1)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_LEA)
{
GenTreeAddrMode* lea = tree->AsAddrMode();
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name);
if (lea->Base() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
}
if (lea->Index() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%d)", lea->Offset());
}
else if (tree->gtOper == GT_BOUNDS_CHECK)
{
switch (tree->AsBoundsChk()->gtThrowKind)
{
case SCK_RNGCHK_FAIL:
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s_Rng", name);
if (tree->AsBoundsChk()->gtIndRngFailBB != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " -> " FMT_BB,
tree->AsBoundsChk()->gtIndRngFailBB->bbNum);
}
break;
}
case SCK_ARG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_Arg", name);
break;
case SCK_ARG_RNG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name);
break;
default:
unreached();
}
}
else if (tree->gtOverflowEx())
{
sprintf_s(bufp, sizeof(buf), " %s_ovfl%c", name, 0);
}
else
{
sprintf_s(bufp, sizeof(buf), " %s%c", name, 0);
}
if (strlen(buf) < 10)
{
printf(" %-10s", buf);
}
else
{
printf(" %s", buf);
}
}
//------------------------------------------------------------------------
// gtDispZeroFieldSeq: If this node has a zero fieldSeq annotation
// then print this Field Sequence
//
void Compiler::gtDispZeroFieldSeq(GenTree* tree)
{
NodeToFieldSeqMap* map = GetZeroOffsetFieldMap();
// THe most common case is having no entries in this map
if (map->GetCount() > 0)
{
FieldSeqNode* fldSeq = nullptr;
if (map->Lookup(tree, &fldSeq))
{
printf(" Zero");
gtDispAnyFieldSeq(fldSeq);
}
}
}
//------------------------------------------------------------------------
// gtDispVN: Utility function that prints a tree's ValueNumber: gtVNPair
//
void Compiler::gtDispVN(GenTree* tree)
{
if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
{
assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);
printf(" ");
vnpPrint(tree->gtVNPair, 0);
}
}
//------------------------------------------------------------------------
// gtDispCommonEndLine
// Utility function that prints the following node information
// 1: The associated zero field sequence (if any)
// 2. The register assigned to this node (if any)
// 2. The value number assigned (if any)
// 3. A newline character
//
void Compiler::gtDispCommonEndLine(GenTree* tree)
{
gtDispZeroFieldSeq(tree);
gtDispRegVal(tree);
gtDispVN(tree);
printf("\n");
}
//------------------------------------------------------------------------
// gtDispNode: Print a tree to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// msg - a contextual method (i.e. from the parent) to print
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' may be null
void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_z_ const char* msg, bool isLIR)
{
bool printFlags = true; // always true..
int msgLength = 25;
GenTree* prev;
if (tree->gtSeqNum)
{
printf("N%03u ", tree->gtSeqNum);
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
printf("(???"
",???"
") "); // This probably indicates a bug: the node has a sequence number, but not costs.
}
}
else
{
prev = tree;
bool hasSeqNum = true;
unsigned dotNum = 0;
do
{
dotNum++;
prev = prev->gtPrev;
if ((prev == nullptr) || (prev == tree))
{
hasSeqNum = false;
break;
}
assert(prev);
} while (prev->gtSeqNum == 0);
// If we have an indent stack, don't add additional characters,
// as it will mess up the alignment.
bool displayDotNum = hasSeqNum && (indentStack == nullptr);
if (displayDotNum)
{
printf("N%03u.%02u ", prev->gtSeqNum, dotNum);
}
else
{
printf(" ");
}
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
if (displayDotNum)
{
// Do better alignment in this case
printf(" ");
}
else
{
printf(" ");
}
}
}
if (optValnumCSE_phase)
{
if (IS_CSE_INDEX(tree->gtCSEnum))
{
printf(FMT_CSE " (%s)", GET_CSE_INDEX(tree->gtCSEnum), (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
}
else
{
printf(" ");
}
}
/* Print the node ID */
printTreeID(tree);
printf(" ");
if (tree->gtOper >= GT_COUNT)
{
printf(" **** ILLEGAL NODE ****");
return;
}
if (printFlags)
{
/* First print the flags associated with the node */
switch (tree->gtOper)
{
case GT_LEA:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_IND:
// We prefer printing V or U
if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
{
if (tree->gtFlags & GTF_IND_TGTANYWHERE)
{
printf("*");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP)
{
printf("s");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_INVARIANT)
{
printf("#");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ARR_INDEX)
{
printf("a");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
printf("n"); // print a n for non-faulting
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ASG_LHS)
{
printf("D"); // print a D for definition
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONNULL)
{
printf("@");
--msgLength;
break;
}
}
FALLTHROUGH;
case GT_INDEX:
case GT_INDEX_ADDR:
case GT_FIELD:
case GT_CLS_VAR:
if (tree->gtFlags & GTF_IND_VOLATILE)
{
printf("V");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_UNALIGNED)
{
printf("U");
--msgLength;
break;
}
goto DASH;
case GT_ASG:
if (tree->OperIsInitBlkOp())
{
printf("I");
--msgLength;
break;
}
goto DASH;
case GT_CALL:
if (tree->AsCall()->IsInlineCandidate())
{
if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("&");
}
else
{
printf("I");
}
--msgLength;
break;
}
else if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("G");
--msgLength;
break;
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
{
printf("S");
--msgLength;
break;
}
if (tree->gtFlags & GTF_CALL_HOISTABLE)
{
printf("H");
--msgLength;
break;
}
goto DASH;
case GT_MUL:
#if !defined(TARGET_64BIT)
case GT_MUL_LONG:
#endif
if (tree->gtFlags & GTF_MUL_64RSLT)
{
printf("L");
--msgLength;
break;
}
goto DASH;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (tree->gtFlags & GTF_DIV_BY_CNS_OPT)
{
printf("M"); // We will use a Multiply by reciprical
--msgLength;
break;
}
goto DASH;
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (tree->gtFlags & GTF_VAR_USEASG)
{
printf("U");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_MULTIREG)
{
printf((tree->gtFlags & GTF_VAR_DEF) ? "M" : "m");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_DEF)
{
printf("D");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CAST)
{
printf("C");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
printf("i");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CONTEXT)
{
printf("!");
--msgLength;
break;
}
goto DASH;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
printf("N");
--msgLength;
break;
}
if (tree->gtFlags & GTF_RELOP_JMP_USED)
{
printf("J");
--msgLength;
break;
}
goto DASH;
case GT_JCMP:
printf((tree->gtFlags & GTF_JCMP_TST) ? "T" : "C");
printf((tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
goto DASH;
case GT_CNS_INT:
if (tree->IsIconHandle())
{
if ((tree->gtFlags & GTF_ICON_INITCLASS) != 0)
{
printf("I"); // Static Field handle with INITCLASS requirement
--msgLength;
break;
}
else if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf("O");
--msgLength;
break;
}
else
{
// Some other handle
printf("H");
--msgLength;
break;
}
}
goto DASH;
default:
DASH:
printf("-");
--msgLength;
break;
}
/* Then print the general purpose flags */
GenTreeFlags flags = tree->gtFlags;
if (tree->OperIsBinary() || tree->OperIsMultiOp())
{
genTreeOps oper = tree->OperGet();
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul/shl Binary Operators
if ((oper == GT_ADD) || (oper == GT_MUL) || (oper == GT_LSH))
{
if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
{
flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
}
}
}
else // !(tree->OperIsBinary() || tree->OperIsMultiOp())
{
// the GTF_REVERSE flag only applies to binary operations (which some MultiOp nodes are).
flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
}
msgLength -= GenTree::gtDispFlags(flags, tree->gtDebugFlags);
/*
printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
(flags & GTF_BOOLEAN ) ? 'B' : '-');
printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
*/
}
// If we're printing a node for LIR, we use the space normally associated with the message
// to display the node's temp name (if any)
const bool hasOperands = tree->OperandsBegin() != tree->OperandsEnd();
if (isLIR)
{
assert(msg == nullptr);
// If the tree does not have any operands, we do not display the indent stack. This gives us
// two additional characters for alignment.
if (!hasOperands)
{
msgLength += 1;
}
if (tree->IsValue())
{
const size_t bufLength = msgLength - 1;
msg = reinterpret_cast<char*>(_alloca(bufLength * sizeof(char)));
sprintf_s(const_cast<char*>(msg), bufLength, "t%d = %s", tree->gtTreeID, hasOperands ? "" : " ");
}
}
/* print the msg associated with the node */
if (msg == nullptr)
{
msg = "";
}
if (msgLength < 0)
{
msgLength = 0;
}
printf(isLIR ? " %+*s" : " %-*s", msgLength, msg);
/* Indent the node accordingly */
if (!isLIR || hasOperands)
{
printIndent(indentStack);
}
gtDispNodeName(tree);
assert(tree == nullptr || tree->gtOper < GT_COUNT);
if (tree)
{
/* print the type of the node */
if (tree->gtOper != GT_CAST)
{
printf(" %-6s", varTypeName(tree->TypeGet()));
if (varTypeIsStruct(tree->TypeGet()))
{
ClassLayout* layout = nullptr;
if (tree->OperIs(GT_BLK, GT_OBJ, GT_STORE_BLK, GT_STORE_OBJ))
{
layout = tree->AsBlk()->GetLayout();
}
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varTypeIsStruct(varDsc->TypeGet()))
{
layout = varDsc->GetLayout();
}
}
if (layout != nullptr)
{
gtDispClassLayout(layout, tree->TypeGet());
}
}
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_STORE_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->IsAddressExposed())
{
printf("(AX)"); // Variable has address exposed.
}
if (varDsc->lvUnusedStruct)
{
assert(varDsc->lvPromoted);
printf("(U)"); // Unused struct
}
else if (varDsc->lvPromoted)
{
if (varTypeIsPromotable(varDsc))
{
printf("(P)"); // Promoted struct
}
else
{
// Promoted implicit by-refs can have this state during
// global morph while they are being rewritten
printf("(P?!)"); // Promoted struct
}
}
}
if (tree->IsArgPlaceHolderNode() && (tree->AsArgPlace()->gtArgPlaceClsHnd != nullptr))
{
printf(" => [clsHnd=%08X]", dspPtr(tree->AsArgPlace()->gtArgPlaceClsHnd));
}
if (tree->gtOper == GT_RUNTIMELOOKUP)
{
#ifdef TARGET_64BIT
printf(" 0x%llx", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#else
printf(" 0x%x", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#endif
switch (tree->AsRuntimeLookup()->gtHndType)
{
case CORINFO_HANDLETYPE_CLASS:
printf(" class");
break;
case CORINFO_HANDLETYPE_METHOD:
printf(" method");
break;
case CORINFO_HANDLETYPE_FIELD:
printf(" field");
break;
default:
printf(" unknown");
break;
}
}
}
// for tracking down problems in reguse prediction or liveness tracking
if (verbose && 0)
{
printf(" RR=");
dspRegMask(tree->gtRsvdRegs);
printf("\n");
}
}
}
#if FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispMultiRegCount: determine how many registers to print for a multi-reg node
//
// Arguments:
// tree - GenTree node whose registers we want to print
//
// Return Value:
// The number of registers to print
//
// Notes:
// This is not the same in all cases as GenTree::GetMultiRegCount().
// In particular, for COPY or RELOAD it only returns the number of *valid* registers,
// and for CALL, it will return 0 if the ReturnTypeDesc hasn't yet been initialized.
// But we want to print all register positions.
//
unsigned Compiler::gtDispMultiRegCount(GenTree* tree)
{
if (tree->IsCopyOrReload())
{
// GetRegCount() will return only the number of valid regs for COPY or RELOAD,
// but we want to print all positions, so we get the reg count for op1.
return gtDispMultiRegCount(tree->gtGetOp1());
}
else if (!tree->IsMultiRegNode())
{
// We can wind up here because IsMultiRegNode() always returns true for COPY or RELOAD,
// even if its op1 is not multireg.
// Note that this method won't be called for non-register-producing nodes.
return 1;
}
else if (tree->OperIs(GT_CALL))
{
unsigned regCount = tree->AsCall()->GetReturnTypeDesc()->TryGetReturnRegCount();
// If it hasn't yet been initialized, we'd still like to see the registers printed.
if (regCount == 0)
{
regCount = MAX_RET_REG_COUNT;
}
return regCount;
}
else
{
return tree->GetMultiRegCount(this);
}
}
#endif // FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispRegVal: Print the register(s) defined by the given node
//
// Arguments:
// tree - Gentree node whose registers we want to print
//
void Compiler::gtDispRegVal(GenTree* tree)
{
switch (tree->GetRegTag())
{
// Don't display anything for the GT_REGTAG_NONE case;
// the absence of printed register values will imply this state.
case GenTree::GT_REGTAG_REG:
printf(" REG %s", compRegVarName(tree->GetRegNum()));
break;
default:
return;
}
#if FEATURE_MULTIREG_RET
if (tree->IsMultiRegNode())
{
// 0th reg is GetRegNum(), which is already printed above.
// Print the remaining regs of a multi-reg node.
unsigned regCount = gtDispMultiRegCount(tree);
// For some nodes, e.g. COPY, RELOAD or CALL, we may not have valid regs for all positions.
for (unsigned i = 1; i < regCount; ++i)
{
regNumber reg = tree->GetRegByIndex(i);
printf(",%s", genIsValidReg(reg) ? compRegVarName(reg) : "NA");
}
}
#endif
}
// We usually/commonly don't expect to print anything longer than this string,
#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2)
void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut)
{
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = compMap2ILvarNum(lclNum);
if (ilNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM)
{
ilName = "RetBuf";
}
else if (ilNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM)
{
ilName = "VarArgHandle";
}
else if (ilNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM)
{
ilName = "TypeCtx";
}
else if (ilNum == (unsigned)ICorDebugInfo::UNKNOWN_ILNUM)
{
if (lclNumIsTrueCSE(lclNum))
{
ilKind = "cse";
ilNum = lclNum - optCSEstart;
}
else if (lclNum >= optCSEstart)
{
// Currently any new LclVar's introduced after the CSE phase
// are believed to be created by the "rationalizer" that is what is meant by the "rat" prefix.
ilKind = "rat";
ilNum = lclNum - (optCSEstart + optCSEcount);
}
else
{
if (lclNum == info.compLvFrameListRoot)
{
ilName = "FramesRoot";
}
else if (lclNum == lvaInlinedPInvokeFrameVar)
{
ilName = "PInvokeFrame";
}
else if (lclNum == lvaGSSecurityCookie)
{
ilName = "GsCookie";
}
else if (lclNum == lvaRetAddrVar)
{
ilName = "ReturnAddress";
}
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
{
ilName = "PInvokeFrameRegSave";
}
else if (lclNum == lvaOutgoingArgSpaceVar)
{
ilName = "OutArgs";
}
#endif // FEATURE_FIXED_OUT_ARGS
#if !defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaShadowSPslotsVar)
{
ilName = "EHSlots";
}
#endif // !FEATURE_EH_FUNCLETS
#ifdef JIT32_GCENCODER
else if (lclNum == lvaLocAllocSPvar)
{
ilName = "LocAllocSP";
}
#endif // JIT32_GCENCODER
#if defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaPSPSym)
{
ilName = "PSPSym";
}
#endif // FEATURE_EH_FUNCLETS
else
{
ilKind = "tmp";
if (compIsForInlining())
{
ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
}
else
{
ilNum = lclNum - info.compLocalsCount;
}
}
}
}
else if (lclNum < (compIsForInlining() ? impInlineInfo->InlinerCompiler->info.compArgsCount : info.compArgsCount))
{
if (ilNum == 0 && !info.compIsStatic)
{
ilName = "this";
}
else
{
ilKind = "arg";
}
}
else
{
if (!lvaTable[lclNum].lvIsStructField)
{
ilKind = "loc";
}
if (compIsForInlining())
{
ilNum -= impInlineInfo->InlinerCompiler->info.compILargsCount;
}
else
{
ilNum -= info.compILargsCount;
}
}
*ilKindOut = ilKind;
*ilNameOut = ilName;
*ilNumOut = ilNum;
}
/*****************************************************************************/
int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
{
char* bufp_next = buf;
unsigned charsPrinted = 0;
int sprintf_result;
sprintf_result = sprintf_s(bufp_next, buf_remaining, "V%02u", lclNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = 0;
gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
if (ilName != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s", ilName);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
else if (ilKind != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s%d", ilKind, ilNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
assert(charsPrinted > 0);
assert(buf_remaining > 0);
return (int)charsPrinted;
}
/*****************************************************************************
* Get the local var name, and create a copy of the string that can be used in debug output.
*/
char* Compiler::gtGetLclVarName(unsigned lclNum)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return nullptr;
}
char* retBuf = new (this, CMK_DebugOnly) char[charsPrinted + 1];
strcpy_s(retBuf, charsPrinted + 1, buf);
return retBuf;
}
/*****************************************************************************/
void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return;
}
printf("%s", buf);
if (padForBiggestDisp && (charsPrinted < (int)LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH))
{
printf("%*c", LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH - charsPrinted, ' ');
}
}
//------------------------------------------------------------------------
// gtDispLclVarStructType: Print size and type information about a struct or lclBlk local variable.
//
// Arguments:
// lclNum - The local var id.
//
void Compiler::gtDispLclVarStructType(unsigned lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types type = varDsc->TypeGet();
if (type == TYP_STRUCT)
{
ClassLayout* layout = varDsc->GetLayout();
assert(layout != nullptr);
gtDispClassLayout(layout, type);
}
else if (type == TYP_LCLBLK)
{
#if FEATURE_FIXED_OUT_ARGS
assert(lclNum == lvaOutgoingArgSpaceVar);
// Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
// after we set it to something.
if (lvaOutgoingArgSpaceSize.HasFinalValue())
{
// A PhasedVar<T> can't be directly used as an arg to a variadic function
unsigned value = lvaOutgoingArgSpaceSize;
printf("<%u> ", value);
}
else
{
printf("<na> "); // The value hasn't yet been determined
}
#else
assert(!"Unknown size");
NO_WAY("Target doesn't support TYP_LCLBLK");
#endif // FEATURE_FIXED_OUT_ARGS
}
}
//------------------------------------------------------------------------
// gtDispClassLayout: Print size and type information about a layout.
//
// Arguments:
// layout - the layout;
// type - variable type, used to avoid printing size for SIMD nodes.
//
void Compiler::gtDispClassLayout(ClassLayout* layout, var_types type)
{
assert(layout != nullptr);
if (layout->IsBlockLayout())
{
printf("<%u>", layout->GetSize());
}
else if (varTypeIsSIMD(type))
{
printf("<%s>", layout->GetClassName());
}
else
{
printf("<%s, %u>", layout->GetClassName(), layout->GetSize());
}
}
/*****************************************************************************/
void Compiler::gtDispConst(GenTree* tree)
{
assert(tree->OperIsConst());
switch (tree->gtOper)
{
case GT_CNS_INT:
if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
const WCHAR* str = eeGetCPString(tree->AsIntCon()->gtIconVal);
// If *str points to a '\0' then don't print the string's values
if ((str != nullptr) && (*str != '\0'))
{
printf(" 0x%X \"%S\"", dspPtr(tree->AsIntCon()->gtIconVal), str);
}
else // We can't print the value of the string
{
// Note that eeGetCPString isn't currently implemented on Linux/ARM
// and instead always returns nullptr
printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->gtIconVal));
}
}
else
{
ssize_t dspIconVal =
tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->gtIconVal) : tree->AsIntCon()->gtIconVal;
if (tree->TypeGet() == TYP_REF)
{
assert(tree->AsIntCon()->gtIconVal == 0);
printf(" null");
}
else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000))
{
printf(" %ld", dspIconVal);
}
#ifdef TARGET_64BIT
else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0)
{
if (dspIconVal >= 0)
{
printf(" 0x%llx", dspIconVal);
}
else
{
printf(" -0x%llx", -dspIconVal);
}
}
#endif
else
{
if (dspIconVal >= 0)
{
printf(" 0x%X", dspIconVal);
}
else
{
printf(" -0x%X", -dspIconVal);
}
}
if (tree->IsIconHandle())
{
switch (tree->GetIconHandleFlag())
{
case GTF_ICON_SCOPE_HDL:
printf(" scope");
break;
case GTF_ICON_CLASS_HDL:
printf(" class");
break;
case GTF_ICON_METHOD_HDL:
printf(" method");
break;
case GTF_ICON_FIELD_HDL:
printf(" field");
break;
case GTF_ICON_STATIC_HDL:
printf(" static");
break;
case GTF_ICON_STR_HDL:
unreached(); // This case is handled above
break;
case GTF_ICON_CONST_PTR:
printf(" const ptr");
break;
case GTF_ICON_GLOBAL_PTR:
printf(" global ptr");
break;
case GTF_ICON_VARG_HDL:
printf(" vararg");
break;
case GTF_ICON_PINVKI_HDL:
printf(" pinvoke");
break;
case GTF_ICON_TOKEN_HDL:
printf(" token");
break;
case GTF_ICON_TLS_HDL:
printf(" tls");
break;
case GTF_ICON_FTN_ADDR:
printf(" ftn");
break;
case GTF_ICON_CIDMID_HDL:
printf(" cid/mid");
break;
case GTF_ICON_BBC_PTR:
printf(" bbc");
break;
case GTF_ICON_STATIC_BOX_PTR:
printf(" static box ptr");
break;
default:
printf(" UNKNOWN");
break;
}
}
if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf(" field offset");
}
#ifdef FEATURE_SIMD
if ((tree->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
{
printf(" vector element count");
}
#endif
if ((tree->IsReuseRegVal()) != 0)
{
printf(" reuse reg val");
}
}
gtDispFieldSeq(tree->AsIntCon()->gtFieldSeq);
break;
case GT_CNS_LNG:
printf(" 0x%016I64x", tree->AsLngCon()->gtLconVal);
break;
case GT_CNS_DBL:
if (*((__int64*)&tree->AsDblCon()->gtDconVal) == (__int64)I64(0x8000000000000000))
{
printf(" -0.00000");
}
else
{
printf(" %#.17g", tree->AsDblCon()->gtDconVal);
}
break;
case GT_CNS_STR:
printf("<string constant>");
break;
default:
assert(!"unexpected constant node");
}
}
//------------------------------------------------------------------------
// gtDispFieldSeq: "gtDispFieldSeq" that also prints "<NotAField>".
//
// Useful for printing zero-offset field sequences.
//
void Compiler::gtDispAnyFieldSeq(FieldSeqNode* fieldSeq)
{
if (fieldSeq == FieldSeqStore::NotAField())
{
printf(" Fseq<NotAField>");
return;
}
gtDispFieldSeq(fieldSeq);
}
//------------------------------------------------------------------------
// gtDispFieldSeq: Print out the fields in this field sequence.
//
void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
{
if ((pfsn == nullptr) || (pfsn == FieldSeqStore::NotAField()))
{
return;
}
// Otherwise...
printf(" Fseq[");
while (pfsn != nullptr)
{
assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
CORINFO_FIELD_HANDLE fldHnd = pfsn->m_fieldHnd;
// First check the "pseudo" field handles...
if (fldHnd == FieldSeqStore::FirstElemPseudoField)
{
printf("#FirstElem");
}
else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField)
{
printf("#ConstantIndex");
}
else
{
printf("%s", eeGetFieldName(fldHnd));
}
pfsn = pfsn->m_next;
if (pfsn != nullptr)
{
printf(", ");
}
}
printf("]");
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a single leaf node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
{
if (tree->OperIsConst())
{
gtDispConst(tree);
return;
}
bool isLclFld = false;
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
isLclFld = true;
FALLTHROUGH;
case GT_PHI_ARG:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_STORE_LCL_VAR:
{
printf(" ");
const unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(varNum);
gtDispLclVar(varNum);
if (tree->AsLclVarCommon()->HasSsaName())
{
if (tree->gtFlags & GTF_VAR_USEASG)
{
assert(tree->gtFlags & GTF_VAR_DEF);
printf("ud:%d->%d", tree->AsLclVarCommon()->GetSsaNum(), GetSsaNumForLocalVarDef(tree));
}
else
{
printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->AsLclVarCommon()->GetSsaNum());
}
}
if (isLclFld)
{
printf("[+%u]", tree->AsLclFld()->GetLclOffs());
gtDispFieldSeq(tree->AsLclFld()->GetFieldSeq());
}
if (varDsc->lvRegister)
{
printf(" ");
varDsc->PrintVarReg();
}
else if (tree->InReg())
{
printf(" %s", compRegVarName(tree->GetRegNum()));
}
if (varDsc->lvPromoted)
{
if (!varTypeIsPromotable(varDsc) && !varDsc->lvUnusedStruct)
{
// Promoted implicit byrefs can get in this state while they are being rewritten
// in global morph.
}
else
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(i);
const char* fieldName;
#if !defined(TARGET_64BIT)
if (varTypeIsLong(varDsc))
{
fieldName = (i == 0) ? "lo" : "hi";
}
else
#endif // !defined(TARGET_64BIT)
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
CORINFO_FIELD_HANDLE fldHnd =
info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
fieldName = eeGetFieldName(fldHnd);
}
printf("\n");
printf(" ");
printIndent(indentStack);
printf(" %-6s V%02u.%s (offs=0x%02x) -> ", varTypeName(fieldVarDsc->TypeGet()),
tree->AsLclVarCommon()->GetLclNum(), fieldName, fieldVarDsc->lvFldOffset);
gtDispLclVar(i);
if (fieldVarDsc->lvRegister)
{
printf(" ");
fieldVarDsc->PrintVarReg();
}
if (fieldVarDsc->lvTracked && fgLocalVarLivenessDone && tree->IsMultiRegLclVar() &&
tree->AsLclVar()->IsLastUse(i - varDsc->lvFieldLclStart))
{
printf(" (last use)");
}
}
}
}
else // a normal not-promoted lclvar
{
if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
printf(" (last use)");
}
}
}
break;
case GT_JMP:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsVal()->gtVal1, &className);
printf(" %s.%s\n", className, methodName);
}
break;
case GT_CLS_VAR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
gtDispFieldSeq(tree->AsClsVar()->gtFieldSeq);
break;
case GT_CLS_VAR_ADDR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
break;
case GT_LABEL:
break;
case GT_FTN_ADDR:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsFptrVal()->gtFptrMethod, &className);
printf(" %s.%s\n", className, methodName);
}
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
printf(" endNstLvl=%d", tree->AsVal()->gtVal1);
break;
#endif // !FEATURE_EH_FUNCLETS
// Vanilla leaves. No qualifying information available. So do nothing
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
case GT_CATCH_ARG:
case GT_MEMORYBARRIER:
case GT_ARGPLACE:
case GT_PINVOKE_PROLOG:
case GT_JMPTABLE:
break;
case GT_RET_EXPR:
{
GenTree* const associatedTree = tree->AsRetExpr()->gtInlineCandidate;
printf("(inl return %s ", tree->IsCall() ? " from call" : "expr");
printTreeID(associatedTree);
printf(")");
}
break;
case GT_PHYSREG:
printf(" %s", getRegName(tree->AsPhysReg()->gtSrcReg));
break;
case GT_IL_OFFSET:
printf(" ");
tree->AsILOffset()->gtStmtDI.Dump(true);
break;
case GT_JCC:
case GT_SETCC:
printf(" cond=%s", tree->AsCC()->gtCondition.Name());
break;
case GT_JCMP:
printf(" cond=%s%s", (tree->gtFlags & GTF_JCMP_TST) ? "TEST_" : "",
(tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
break;
default:
assert(!"don't know how to display tree leaf node");
}
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a child node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// arcType - the type of arc to use for this child
// msg - a contextual method (i.e. from the parent) to print
// topOnly - a boolean indicating whether to print the children, or just the top node
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' has a default value of null
// 'topOnly' is an optional argument that defaults to false
void Compiler::gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg, /* = nullptr */
bool topOnly) /* = false */
{
indentStack->Push(arcType);
gtDispTree(child, indentStack, msg, topOnly);
indentStack->Pop();
}
#ifdef FEATURE_SIMD
// Intrinsic Id to name map
extern const char* const simdIntrinsicNames[] = {
#define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
#include "simdintrinsiclist.h"
};
#endif // FEATURE_SIMD
/*****************************************************************************/
void Compiler::gtDispTree(GenTree* tree,
IndentStack* indentStack, /* = nullptr */
_In_ _In_opt_z_ const char* msg, /* = nullptr */
bool topOnly, /* = false */
bool isLIR) /* = false */
{
if (tree == nullptr)
{
printf(" [%08X] <NULL>\n", tree);
printf(""); // null string means flush
return;
}
if (indentStack == nullptr)
{
indentStack = new (this, CMK_DebugOnly) IndentStack(this);
}
if (IsUninitialized(tree))
{
/* Value used to initalize nodes */
printf("Uninitialized tree node!\n");
return;
}
if (tree->gtOper >= GT_COUNT)
{
gtDispNode(tree, indentStack, msg, isLIR);
printf("Bogus operator!\n");
return;
}
/* Is tree a leaf node? */
if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
gtDispNode(tree, indentStack, msg, isLIR);
gtDispLeaf(tree, indentStack);
gtDispCommonEndLine(tree);
if (tree->OperIsLocalStore() && !topOnly)
{
gtDispChild(tree->AsOp()->gtOp1, indentStack, IINone);
}
return;
}
// Determine what kind of arc to propagate.
IndentInfo myArc = IINone;
IndentInfo lowerArc = IINone;
if (indentStack->Depth() > 0)
{
myArc = indentStack->Pop();
switch (myArc)
{
case IIArcBottom:
indentStack->Push(IIArc);
lowerArc = IINone;
break;
case IIArc:
indentStack->Push(IIArc);
lowerArc = IIArc;
break;
case IIArcTop:
indentStack->Push(IINone);
lowerArc = IIArc;
break;
case IINone:
indentStack->Push(IINone);
lowerArc = IINone;
break;
default:
unreached();
break;
}
}
/* Is it a 'simple' unary/binary operator? */
const char* childMsg = nullptr;
if (tree->OperIsSimple())
{
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
if (tree->gtOper == GT_CAST)
{
/* Format a message that explains the effect of this GT_CAST */
var_types fromType = genActualType(tree->AsCast()->CastOp()->TypeGet());
var_types toType = tree->CastToType();
var_types finalType = tree->TypeGet();
/* if GTF_UNSIGNED is set then force fromType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
{
fromType = varTypeToUnsigned(fromType);
}
if (finalType != toType)
{
printf(" %s <-", varTypeName(finalType));
}
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
if (tree->OperIsBlkOp())
{
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
if (tree->OperIsStoreBlk() && (tree->AsBlk()->gtBlkOpKind != GenTreeBlk::BlkOpKindInvalid))
{
switch (tree->AsBlk()->gtBlkOpKind)
{
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
printf(" (RepInstr)");
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
printf(" (Unroll)");
break;
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
printf(" (Helper)");
break;
#endif
default:
unreached();
}
}
}
#if FEATURE_PUT_STRUCT_ARG_STK
else if (tree->OperGet() == GT_PUTARG_STK)
{
const GenTreePutArgStk* putArg = tree->AsPutArgStk();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots,
putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset());
}
#endif
if (putArg->gtPutArgStkKind != GenTreePutArgStk::Kind::Invalid)
{
switch (putArg->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
printf(" (RepInstr)");
break;
case GenTreePutArgStk::Kind::PartialRepInstr:
printf(" (PartialRepInstr)");
break;
case GenTreePutArgStk::Kind::Unroll:
printf(" (Unroll)");
break;
case GenTreePutArgStk::Kind::Push:
printf(" (Push)");
break;
case GenTreePutArgStk::Kind::PushAllSlots:
printf(" (PushAllSlots)");
break;
default:
unreached();
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperGet() == GT_PUTARG_SPLIT)
{
const GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(),
putArg->gtNumRegs);
}
#endif
}
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
if (tree->OperIs(GT_FIELD))
{
if (FieldSeqStore::IsPseudoField(tree->AsField()->gtFldHnd))
{
printf(" #PseudoField:0x%x", tree->AsField()->gtFldOffset);
}
else
{
printf(" %s", eeGetFieldName(tree->AsField()->gtFldHnd), 0);
}
}
if (tree->gtOper == GT_INTRINSIC)
{
GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
switch (intrinsic->gtIntrinsicName)
{
case NI_System_Math_Abs:
printf(" abs");
break;
case NI_System_Math_Acos:
printf(" acos");
break;
case NI_System_Math_Acosh:
printf(" acosh");
break;
case NI_System_Math_Asin:
printf(" asin");
break;
case NI_System_Math_Asinh:
printf(" asinh");
break;
case NI_System_Math_Atan:
printf(" atan");
break;
case NI_System_Math_Atanh:
printf(" atanh");
break;
case NI_System_Math_Atan2:
printf(" atan2");
break;
case NI_System_Math_Cbrt:
printf(" cbrt");
break;
case NI_System_Math_Ceiling:
printf(" ceiling");
break;
case NI_System_Math_Cos:
printf(" cos");
break;
case NI_System_Math_Cosh:
printf(" cosh");
break;
case NI_System_Math_Exp:
printf(" exp");
break;
case NI_System_Math_Floor:
printf(" floor");
break;
case NI_System_Math_FMod:
printf(" fmod");
break;
case NI_System_Math_FusedMultiplyAdd:
printf(" fma");
break;
case NI_System_Math_ILogB:
printf(" ilogb");
break;
case NI_System_Math_Log:
printf(" log");
break;
case NI_System_Math_Log2:
printf(" log2");
break;
case NI_System_Math_Log10:
printf(" log10");
break;
case NI_System_Math_Max:
printf(" max");
break;
case NI_System_Math_Min:
printf(" min");
break;
case NI_System_Math_Pow:
printf(" pow");
break;
case NI_System_Math_Round:
printf(" round");
break;
case NI_System_Math_Sin:
printf(" sin");
break;
case NI_System_Math_Sinh:
printf(" sinh");
break;
case NI_System_Math_Sqrt:
printf(" sqrt");
break;
case NI_System_Math_Tan:
printf(" tan");
break;
case NI_System_Math_Tanh:
printf(" tanh");
break;
case NI_System_Math_Truncate:
printf(" truncate");
break;
case NI_System_Object_GetType:
printf(" objGetType");
break;
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
printf(" isKnownConst");
break;
default:
unreached();
}
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
if (tree->AsOp()->gtOp1 != nullptr)
{
// Label the child of the GT_COLON operator
// op1 is the else part
if (tree->gtOper == GT_COLON)
{
childMsg = "else";
}
else if (tree->gtOper == GT_QMARK)
{
childMsg = " if";
}
gtDispChild(tree->AsOp()->gtOp1, indentStack,
(tree->gtGetOp2IfPresent() == nullptr) ? IIArcBottom : IIArc, childMsg, topOnly);
}
if (tree->gtGetOp2IfPresent())
{
// Label the childMsgs of the GT_COLON operator
// op2 is the then part
if (tree->gtOper == GT_COLON)
{
childMsg = "then";
}
gtDispChild(tree->AsOp()->gtOp2, indentStack, IIArcBottom, childMsg, topOnly);
}
}
return;
}
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
// See what kind of a special operator we have here, and handle its special children.
switch (tree->gtOper)
{
case GT_FIELD_LIST:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
char offset[32];
sprintf_s(offset, sizeof(offset), "ofs %u", use.GetOffset());
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, offset);
}
}
break;
case GT_PHI:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
char block[32];
sprintf_s(block, sizeof(block), "pred " FMT_BB, use.GetNode()->AsPhiArg()->gtPredBB->bbNum);
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, block);
}
}
break;
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
GenTree* lastChild = nullptr;
call->VisitOperands([&lastChild](GenTree* operand) -> GenTree::VisitResult {
lastChild = operand;
return GenTree::VisitResult::Continue;
});
if (call->gtCallType != CT_INDIRECT)
{
const char* methodName;
const char* className;
methodName = eeGetMethodName(call->gtCallMethHnd, &className);
printf(" %s.%s", className, methodName);
}
if ((call->gtFlags & GTF_CALL_UNMANAGED) && (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH))
{
printf(" (FramesRoot last use)");
}
if (((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) && (call->gtInlineCandidateInfo != nullptr) &&
(call->gtInlineCandidateInfo->exactContextHnd != nullptr))
{
printf(" (exactContextHnd=0x%p)", dspPtr(call->gtInlineCandidateInfo->exactContextHnd));
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
char buf[64];
char* bufp;
bufp = &buf[0];
if ((call->gtCallThisArg != nullptr) && !call->gtCallThisArg->GetNode()->OperIs(GT_NOP, GT_ARGPLACE))
{
if (call->gtCallThisArg->GetNode()->OperIs(GT_ASG))
{
sprintf_s(bufp, sizeof(buf), "this SETUP%c", 0);
}
else
{
sprintf_s(bufp, sizeof(buf), "this in %s%c", compRegVarName(REG_ARG_0), 0);
}
gtDispChild(call->gtCallThisArg->GetNode(), indentStack,
(call->gtCallThisArg->GetNode() == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
}
if (call->gtCallArgs)
{
gtDispArgList(call, lastChild, indentStack);
}
if (call->gtCallType == CT_INDIRECT)
{
gtDispChild(call->gtCallAddr, indentStack, (call->gtCallAddr == lastChild) ? IIArcBottom : IIArc,
"calli tgt", topOnly);
}
if (call->gtControlExpr != nullptr)
{
gtDispChild(call->gtControlExpr, indentStack,
(call->gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr", topOnly);
}
int lateArgIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
IndentInfo arcType = (use.GetNext() == nullptr) ? IIArcBottom : IIArc;
gtGetLateArgMsg(call, use.GetNode(), lateArgIndex, bufp, sizeof(buf));
gtDispChild(use.GetNode(), indentStack, arcType, bufp, topOnly);
lateArgIndex++;
}
}
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD)
if (tree->OperIs(GT_SIMD))
{
printf(" %s %s", varTypeName(tree->AsSIMD()->GetSimdBaseType()),
simdIntrinsicNames[tree->AsSIMD()->GetSIMDIntrinsicId()]);
}
#endif // defined(FEATURE_SIMD)
#if defined(FEATURE_HW_INTRINSICS)
if (tree->OperIs(GT_HWINTRINSIC))
{
printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN
? ""
: varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()),
HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId()));
}
#endif // defined(FEATURE_HW_INTRINSICS)
gtDispCommonEndLine(tree);
if (!topOnly)
{
size_t index = 0;
size_t count = tree->AsMultiOp()->GetOperandCount();
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
gtDispChild(operand, indentStack, ++index < count ? IIArc : IIArcBottom, nullptr, topOnly);
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrElem()->gtArrObj, indentStack, IIArc, nullptr, topOnly);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
IndentInfo arcType = ((dim + 1) == tree->AsArrElem()->gtArrRank) ? IIArcBottom : IIArc;
gtDispChild(tree->AsArrElem()->gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
}
}
break;
case GT_ARR_OFFSET:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrOffs()->gtOffset, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtIndex, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_CMPXCHG:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsCmpXchg()->gtOpLocation, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpValue, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_STORE_DYN_BLK:
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsStoreDynBlk()->Addr(), indentStack, IIArc, nullptr, topOnly);
if (tree->AsStoreDynBlk()->Data() != nullptr)
{
gtDispChild(tree->AsStoreDynBlk()->Data(), indentStack, IIArc, nullptr, topOnly);
}
gtDispChild(tree->AsStoreDynBlk()->gtDynamicSize, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
default:
printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
printf(""); // null string means flush
break;
}
}
//------------------------------------------------------------------------
// gtGetArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// arg - The argument for which a message should be constructed
// argNum - The ordinal number of the arg in the argument list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength)
{
if (call->gtCallLateArgs != nullptr)
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(call, argNum);
assert(curArgTabEntry);
if (arg->gtFlags & GTF_LATE_ARG)
{
sprintf_s(bufp, bufLength, "arg%d SETUP%c", argNum, 0);
}
else
{
#ifdef TARGET_ARM
if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
#if FEATURE_FIXED_OUT_ARGS
sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0);
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
#endif
}
}
else
{
sprintf_s(bufp, bufLength, "arg%d%c", argNum, 0);
}
}
//------------------------------------------------------------------------
// gtGetLateArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// argx - The argument for which a message should be constructed
// lateArgIndex - The ordinal number of the arg in the lastArg list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetLateArgMsg(GenTreeCall* call, GenTree* argx, int lateArgIndex, char* bufp, unsigned bufLength)
{
assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(call, lateArgIndex);
assert(curArgTabEntry);
regNumber argReg = curArgTabEntry->GetRegNum();
#if FEATURE_FIXED_OUT_ARGS
if (argReg == REG_STK)
{
sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum, curArgTabEntry->GetByteOffset(), 0);
}
else
#endif
{
if (curArgTabEntry->use == call->gtCallThisArg)
{
sprintf_s(bufp, bufLength, "this in %s%c", compRegVarName(argReg), 0);
}
#ifdef TARGET_ARM
else if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
unsigned argNum = curArgTabEntry->argNum;
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
else
{
#if FEATURE_MULTIREG_ARGS
if (curArgTabEntry->numRegs >= 2)
{
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum, compRegVarName(argReg), separator,
compRegVarName(curArgTabEntry->GetRegNum(curArgTabEntry->numRegs - 1)), 0);
}
else
#endif
{
sprintf_s(bufp, bufLength, "arg%d in %s%c", curArgTabEntry->argNum, compRegVarName(argReg), 0);
}
}
}
}
//------------------------------------------------------------------------
// gtDispArgList: Dump the tree for a call arg list
//
// Arguments:
// call - the call to dump arguments for
// lastCallOperand - the call's last operand (to determine the arc types)
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
void Compiler::gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack)
{
unsigned argnum = 0;
if (call->gtCallThisArg != nullptr)
{
argnum++;
}
for (GenTreeCall::Use& use : call->Args())
{
GenTree* argNode = use.GetNode();
if (!argNode->IsNothingNode() && !argNode->IsArgPlaceHolderNode())
{
char buf[256];
gtGetArgMsg(call, argNode, argnum, buf, sizeof(buf));
gtDispChild(argNode, indentStack, (argNode == lastCallOperand) ? IIArcBottom : IIArc, buf, false);
}
argnum++;
}
}
// gtDispStmt: Print a statement to jitstdout.
//
// Arguments:
// stmt - the statement to be printed;
// msg - an additional message to print before the statement.
//
void Compiler::gtDispStmt(Statement* stmt, const char* msg /* = nullptr */)
{
if (opts.compDbgInfo)
{
if (msg != nullptr)
{
printf("%s ", msg);
}
printStmtID(stmt);
printf(" ( ");
const DebugInfo& di = stmt->GetDebugInfo();
// For statements in the root we display just the location without the
// inline context info.
if (di.GetInlineContext() == nullptr || di.GetInlineContext()->IsRoot())
{
di.GetLocation().Dump();
}
else
{
stmt->GetDebugInfo().Dump(false);
}
printf(" ... ");
IL_OFFSET lastILOffs = stmt->GetLastILOffset();
if (lastILOffs == BAD_IL_OFFSET)
{
printf("???");
}
else
{
printf("0x%03X", lastILOffs);
}
printf(" )");
DebugInfo par;
if (stmt->GetDebugInfo().GetParent(&par))
{
printf(" <- ");
par.Dump(true);
}
printf("\n");
}
gtDispTree(stmt->GetRootNode());
}
//------------------------------------------------------------------------
// gtDispBlockStmts: dumps all statements inside `block`.
//
// Arguments:
// block - the block to display statements for.
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
}
}
//------------------------------------------------------------------------
// Compiler::gtDispRange: dumps a range of LIR.
//
// Arguments:
// range - the range of LIR to display.
//
void Compiler::gtDispRange(LIR::ReadOnlyRange const& range)
{
for (GenTree* node : range)
{
gtDispLIRNode(node);
}
}
//------------------------------------------------------------------------
// Compiler::gtDispTreeRange: dumps the LIR range that contains all of the
// nodes in the dataflow tree rooted at a given
// node.
//
// Arguments:
// containingRange - the LIR range that contains the root node.
// tree - the root of the dataflow tree.
//
void Compiler::gtDispTreeRange(LIR::Range& containingRange, GenTree* tree)
{
bool unused;
gtDispRange(containingRange.GetTreeRange(tree, &unused));
}
//------------------------------------------------------------------------
// Compiler::gtDispLIRNode: dumps a single LIR node.
//
// Arguments:
// node - the LIR node to dump.
// prefixMsg - an optional prefix for each line of output.
//
void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr */)
{
auto displayOperand = [](GenTree* operand, const char* message, IndentInfo operandArc, IndentStack& indentStack,
size_t prefixIndent) {
assert(operand != nullptr);
assert(message != nullptr);
if (prefixIndent != 0)
{
printf("%*s", (int)prefixIndent, "");
}
// 50 spaces for alignment
printf("%-50s", "");
#if FEATURE_SET_FLAGS
// additional flag enlarges the flag field by one character
printf(" ");
#endif
indentStack.Push(operandArc);
indentStack.print();
indentStack.Pop();
operandArc = IIArc;
printf(" t%-5d %-6s %s\n", operand->gtTreeID, varTypeName(operand->TypeGet()), message);
};
IndentStack indentStack(this);
size_t prefixIndent = 0;
if (prefixMsg != nullptr)
{
prefixIndent = strlen(prefixMsg);
}
const int bufLength = 256;
char buf[bufLength];
const bool nodeIsCall = node->IsCall();
// Visit operands
IndentInfo operandArc = IIArcTop;
for (GenTree* operand : node->Operands())
{
if (operand->IsArgPlaceHolderNode() || !operand->IsValue())
{
// Either of these situations may happen with calls.
continue;
}
if (nodeIsCall)
{
GenTreeCall* call = node->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
sprintf_s(buf, sizeof(buf), "this in %s", compRegVarName(REG_ARG_0));
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallAddr)
{
displayOperand(operand, "calli tgt", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtControlExpr)
{
displayOperand(operand, "control expr", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallCookie)
{
displayOperand(operand, "cookie", operandArc, indentStack, prefixIndent);
}
else
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByNode(call, operand);
assert(curArgTabEntry);
if (!curArgTabEntry->isLateArg())
{
gtGetArgMsg(call, operand, curArgTabEntry->argNum, buf, sizeof(buf));
}
else
{
gtGetLateArgMsg(call, operand, curArgTabEntry->GetLateArgInx(), buf, sizeof(buf));
}
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_STORE_DYN_BLK))
{
if (operand == node->AsBlk()->Addr())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else if (operand == node->AsBlk()->Data())
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
else
{
assert(operand == node->AsStoreDynBlk()->gtDynamicSize);
displayOperand(operand, "size", operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_ASG))
{
if (operand == node->gtGetOp1())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
}
else
{
displayOperand(operand, "", operandArc, indentStack, prefixIndent);
}
operandArc = IIArc;
}
// Visit the operator
if (prefixMsg != nullptr)
{
printf("%s", prefixMsg);
}
const bool topOnly = true;
const bool isLIR = true;
gtDispTree(node, &indentStack, nullptr, topOnly, isLIR);
}
/*****************************************************************************/
#endif // DEBUG
/*****************************************************************************
*
* Check if the given node can be folded,
* and call the methods to perform the folding
*/
GenTree* Compiler::gtFoldExpr(GenTree* tree)
{
unsigned kind = tree->OperKind();
/* We must have a simple operation to fold */
// If we're in CSE, it's not safe to perform tree
// folding given that it can will potentially
// change considered CSE candidates.
if (optValnumCSE_phase)
{
return tree;
}
if (!(kind & GTK_SMPOP))
{
return tree;
}
GenTree* op1 = tree->AsOp()->gtOp1;
/* Filter out non-foldable trees that can have constant children */
assert(kind & (GTK_UNOP | GTK_BINOP));
switch (tree->gtOper)
{
case GT_RETFILT:
case GT_RETURN:
case GT_IND:
return tree;
default:
break;
}
/* try to fold the current node */
if ((kind & GTK_UNOP) && op1)
{
if (op1->OperIsConst())
{
return gtFoldExprConst(tree);
}
}
else if ((kind & GTK_BINOP) && op1 && tree->AsOp()->gtOp2 &&
// Don't take out conditionals for debugging
(opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->AsOp()->gtOp2;
// The atomic operations are exempted here because they are never computable statically;
// one of their arguments is an address.
if (op1->OperIsConst() && op2->OperIsConst() && !tree->OperIsAtomicOp())
{
/* both nodes are constants - fold the expression */
return gtFoldExprConst(tree);
}
else if (op1->OperIsConst() || op2->OperIsConst())
{
/* at least one is a constant - see if we have a
* special operator that can use only one constant
* to fold - e.g. booleans */
return gtFoldExprSpecial(tree);
}
else if (tree->OperIsCompare())
{
/* comparisons of two local variables can sometimes be folded */
return gtFoldExprCompare(tree);
}
}
/* Return the original node (folded/bashed or not) */
return tree;
}
//------------------------------------------------------------------------
// gtFoldExprCall: see if a call is foldable
//
// Arguments:
// call - call to examine
//
// Returns:
// The original call if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// Checks for calls to Type.op_Equality, Type.op_Inequality, and
// Enum.HasFlag, and if the call is to one of these,
// attempts to optimize.
GenTree* Compiler::gtFoldExprCall(GenTreeCall* call)
{
// Can only fold calls to special intrinsics.
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0)
{
return call;
}
// Defer folding if not optimizing.
if (opts.OptimizationDisabled())
{
return call;
}
// Check for a new-style jit intrinsic.
const NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
switch (ni)
{
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = call->gtCallThisArg->GetNode();
GenTree* flagOp = call->gtCallArgs->GetNode();
GenTree* result = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (result != nullptr)
{
return result;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
noway_assert(call->TypeGet() == TYP_INT);
GenTree* op1 = call->gtCallArgs->GetNode();
GenTree* op2 = call->gtCallArgs->GetNext()->GetNode();
// If either operand is known to be a RuntimeType, this can be folded
GenTree* result = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (result != nullptr)
{
return result;
}
break;
}
default:
break;
}
return call;
}
//------------------------------------------------------------------------
// gtFoldTypeEqualityCall: see if a (potential) type equality call is foldable
//
// Arguments:
// isEq -- is it == or != operator
// op1 -- first argument to call
// op2 -- second argument to call
//
// Returns:
// nulltpr if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// If either operand is known to be a a RuntimeType, then the type
// equality methods will simply check object identity and so we can
// fold the call into a simple compare of the call's operands.
GenTree* Compiler::gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2)
{
if ((gtGetTypeProducerKind(op1) == TPK_Unknown) && (gtGetTypeProducerKind(op2) == TPK_Unknown))
{
return nullptr;
}
const genTreeOps simpleOp = isEq ? GT_EQ : GT_NE;
JITDUMP("\nFolding call to Type:op_%s to a simple compare via %s\n", isEq ? "Equality" : "Inequality",
GenTree::OpName(simpleOp));
GenTree* compare = gtNewOperNode(simpleOp, TYP_INT, op1, op2);
return compare;
}
/*****************************************************************************
*
* Some comparisons can be folded:
*
* locA == locA
* classVarA == classVarA
* locA + locB == locB + locA
*
*/
GenTree* Compiler::gtFoldExprCompare(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
assert(tree->OperIsCompare());
/* Filter out cases that cannot be folded here */
/* Do not fold floats or doubles (e.g. NaN != Nan) */
if (varTypeIsFloating(op1->TypeGet()))
{
return tree;
}
// Currently we can only fold when the two subtrees exactly match
// and everything is side effect free.
//
if (((tree->gtFlags & GTF_SIDE_EFFECT) != 0) || !GenTree::Compare(op1, op2, true))
{
// No folding.
//
return tree;
}
// GTF_ORDER_SIDEEFF here may indicate volatile subtrees.
// Or it may indicate a non-null assertion prop into an indir subtree.
//
// Check the operands.
//
if ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)
{
// If op1 is "volatle" and op2 is not, we can still fold.
//
const bool op1MayBeVolatile = (op1->gtFlags & GTF_ORDER_SIDEEFF) != 0;
const bool op2MayBeVolatile = (op2->gtFlags & GTF_ORDER_SIDEEFF) != 0;
if (!op1MayBeVolatile || op2MayBeVolatile)
{
// No folding.
//
return tree;
}
}
GenTree* cons;
switch (tree->gtOper)
{
case GT_EQ:
case GT_LE:
case GT_GE:
cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
break;
case GT_NE:
case GT_LT:
case GT_GT:
cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
break;
default:
assert(!"Unexpected relOp");
return tree;
}
/* The node has beeen folded into 'cons' */
JITDUMP("\nFolding comparison with identical operands:\n");
DISPTREE(tree);
if (fgGlobalMorph)
{
fgMorphTreeDone(cons);
}
else
{
cons->gtNext = tree->gtNext;
cons->gtPrev = tree->gtPrev;
}
JITDUMP("Bashed to %s:\n", cons->AsIntConCommon()->IconValue() ? "true" : "false");
DISPTREE(cons);
return cons;
}
//------------------------------------------------------------------------
// gtCreateHandleCompare: generate a type handle comparison
//
// Arguments:
// oper -- comparison operation (equal/not equal)
// op1 -- first operand
// op2 -- second operand
// typeCheckInliningResult -- indicates how the comparison should happen
//
// Returns:
// Type comparison tree
//
GenTree* Compiler::gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult)
{
// If we can compare pointers directly, just emit the binary operation
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_PASS)
{
return gtNewOperNode(oper, TYP_INT, op1, op2);
}
assert(typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_USE_HELPER);
// Emit a call to a runtime helper
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1, op2);
GenTree* ret = gtNewHelperCallNode(CORINFO_HELP_ARE_TYPES_EQUIVALENT, TYP_INT, helperArgs);
if (oper == GT_EQ)
{
ret = gtNewOperNode(GT_NE, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
else
{
assert(oper == GT_NE);
ret = gtNewOperNode(GT_EQ, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
return ret;
}
//------------------------------------------------------------------------
// gtFoldTypeCompare: see if a type comparison can be further simplified
//
// Arguments:
// tree -- tree possibly comparing types
//
// Returns:
// An alternative tree if folding happens.
// Original tree otherwise.
//
// Notes:
// Checks for
// typeof(...) == obj.GetType()
// typeof(...) == typeof(...)
// obj1.GetType() == obj2.GetType()
//
// And potentially optimizes away the need to obtain actual
// RuntimeType objects to do the comparison.
GenTree* Compiler::gtFoldTypeCompare(GenTree* tree)
{
// Only handle EQ and NE
// (maybe relop vs null someday)
const genTreeOps oper = tree->OperGet();
if ((oper != GT_EQ) && (oper != GT_NE))
{
return tree;
}
// Screen for the right kinds of operands
GenTree* const op1 = tree->AsOp()->gtOp1;
const TypeProducerKind op1Kind = gtGetTypeProducerKind(op1);
if (op1Kind == TPK_Unknown)
{
return tree;
}
GenTree* const op2 = tree->AsOp()->gtOp2;
const TypeProducerKind op2Kind = gtGetTypeProducerKind(op2);
if (op2Kind == TPK_Unknown)
{
return tree;
}
// If both types are created via handles, we can simply compare
// handles instead of the types that they'd create.
if ((op1Kind == TPK_Handle) && (op2Kind == TPK_Handle))
{
JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
GenTree* op1ClassFromHandle = tree->AsOp()->gtOp1->AsCall()->gtCallArgs->GetNode();
GenTree* op2ClassFromHandle = tree->AsOp()->gtOp2->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE cls1Hnd = NO_CLASS_HANDLE;
CORINFO_CLASS_HANDLE cls2Hnd = NO_CLASS_HANDLE;
// Try and find class handles from op1 and op2
cls1Hnd = gtGetHelperArgClassHandle(op1ClassFromHandle);
cls2Hnd = gtGetHelperArgClassHandle(op2ClassFromHandle);
// If we have both class handles, try and resolve the type equality test completely.
bool resolveFailed = false;
if ((cls1Hnd != NO_CLASS_HANDLE) && (cls2Hnd != NO_CLASS_HANDLE))
{
JITDUMP("Asking runtime to compare %p (%s) and %p (%s) for equality\n", dspPtr(cls1Hnd),
info.compCompHnd->getClassName(cls1Hnd), dspPtr(cls2Hnd), info.compCompHnd->getClassName(cls2Hnd));
TypeCompareState s = info.compCompHnd->compareTypesForEquality(cls1Hnd, cls2Hnd);
if (s != TypeCompareState::May)
{
// Type comparison result is known.
const bool typesAreEqual = (s == TypeCompareState::Must);
const bool operatorIsEQ = (oper == GT_EQ);
const int compareResult = operatorIsEQ ^ typesAreEqual ? 0 : 1;
JITDUMP("Runtime reports comparison is known at jit time: %u\n", compareResult);
GenTree* result = gtNewIconNode(compareResult);
return result;
}
else
{
resolveFailed = true;
}
}
if (resolveFailed)
{
JITDUMP("Runtime reports comparison is NOT known at jit time\n");
}
else
{
JITDUMP("Could not find handle for %s%s\n", (cls1Hnd == NO_CLASS_HANDLE) ? " cls1" : "",
(cls2Hnd == NO_CLASS_HANDLE) ? " cls2" : "");
}
// We can't answer the equality comparison definitively at jit
// time, but can still simplify the comparison.
//
// Find out how we can compare the two handles.
// NOTE: We're potentially passing NO_CLASS_HANDLE, but the runtime knows what to do with it here.
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(cls1Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
// If the first type needs helper, check the other type: it might be okay with a simple compare.
if (inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER)
{
inliningKind = info.compCompHnd->canInlineTypeCheck(cls2Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
}
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, op1ClassFromHandle, op2ClassFromHandle, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
if ((op1Kind == TPK_GetType) && (op2Kind == TPK_GetType))
{
GenTree* arg1;
if (op1->OperGet() == GT_INTRINSIC)
{
arg1 = op1->AsUnOp()->gtOp1;
}
else
{
arg1 = op1->AsCall()->gtCallThisArg->GetNode();
}
arg1 = gtNewMethodTableLookup(arg1);
GenTree* arg2;
if (op2->OperGet() == GT_INTRINSIC)
{
arg2 = op2->AsUnOp()->gtOp1;
}
else
{
arg2 = op2->AsCall()->gtCallThisArg->GetNode();
}
arg2 = gtNewMethodTableLookup(arg2);
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(nullptr, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, arg1, arg2, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
// If one operand creates a type from a handle and the other operand is fetching the type from an object,
// we can sometimes optimize the type compare into a simpler
// method table comparison.
//
// TODO: if other operand is null...
if (!(((op1Kind == TPK_GetType) && (op2Kind == TPK_Handle)) ||
((op1Kind == TPK_Handle) && (op2Kind == TPK_GetType))))
{
return tree;
}
GenTree* const opHandle = (op1Kind == TPK_Handle) ? op1 : op2;
GenTree* const opOther = (op1Kind == TPK_Handle) ? op2 : op1;
// Tunnel through the handle operand to get at the class handle involved.
GenTree* const opHandleArgument = opHandle->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE clsHnd = gtGetHelperArgClassHandle(opHandleArgument);
// If we couldn't find the class handle, give up.
if (clsHnd == NO_CLASS_HANDLE)
{
return tree;
}
// Ask the VM if this type can be equality tested by a simple method
// table comparison.
CorInfoInlineTypeCheck typeCheckInliningResult =
info.compCompHnd->canInlineTypeCheck(clsHnd, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_NONE)
{
return tree;
}
// We're good to go.
JITDUMP("Optimizing compare of obj.GetType()"
" and type-from-handle to compare method table pointer\n");
// opHandleArgument is the method table we're looking for.
GenTree* const knownMT = opHandleArgument;
// Fetch object method table from the object itself.
GenTree* objOp = nullptr;
// Note we may see intrinsified or regular calls to GetType
if (opOther->OperGet() == GT_INTRINSIC)
{
objOp = opOther->AsUnOp()->gtOp1;
}
else
{
objOp = opOther->AsCall()->gtCallThisArg->GetNode();
}
bool pIsExact = false;
bool pIsNonNull = false;
CORINFO_CLASS_HANDLE objCls = gtGetClassHandle(objOp, &pIsExact, &pIsNonNull);
// if both classes are "final" (e.g. System.String[]) we can replace the comparison
// with `true/false` + null check.
if ((objCls != NO_CLASS_HANDLE) && (pIsExact || impIsClassExact(objCls)))
{
TypeCompareState tcs = info.compCompHnd->compareTypesForEquality(objCls, clsHnd);
if (tcs != TypeCompareState::May)
{
const bool operatorIsEQ = oper == GT_EQ;
const bool typesAreEqual = tcs == TypeCompareState::Must;
GenTree* compareResult = gtNewIconNode((operatorIsEQ ^ typesAreEqual) ? 0 : 1);
if (!pIsNonNull)
{
// we still have to emit a null-check
// obj.GetType == typeof() -> (nullcheck) true/false
GenTree* nullcheck = gtNewNullCheck(objOp, compCurBB);
return gtNewOperNode(GT_COMMA, tree->TypeGet(), nullcheck, compareResult);
}
else if (objOp->gtFlags & GTF_ALL_EFFECT)
{
return gtNewOperNode(GT_COMMA, tree->TypeGet(), objOp, compareResult);
}
else
{
return compareResult;
}
}
}
// Fetch the method table from the object
GenTree* const objMT = gtNewMethodTableLookup(objOp);
// Compare the two method tables
GenTree* const compare = gtCreateHandleCompare(oper, objMT, knownMT, typeCheckInliningResult);
// Drop any now irrelevant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// And we're done
return compare;
}
//------------------------------------------------------------------------
// gtGetHelperArgClassHandle: find the compile time class handle from
// a helper call argument tree
//
// Arguments:
// tree - tree that passes the handle to the helper
//
// Returns:
// The compile time class handle if known.
//
CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE result = NO_CLASS_HANDLE;
// Walk through any wrapping nop.
if ((tree->gtOper == GT_NOP) && (tree->gtType == TYP_I_IMPL))
{
tree = tree->AsOp()->gtOp1;
}
// The handle could be a literal constant
if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() == TYP_I_IMPL))
{
assert(tree->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)tree->AsIntCon()->gtCompileTimeHandle;
}
// Or the result of a runtime lookup
else if (tree->OperGet() == GT_RUNTIMELOOKUP)
{
result = tree->AsRuntimeLookup()->GetClassHandle();
}
// Or something reached indirectly
else if (tree->gtOper == GT_IND)
{
// The handle indirs we are looking for will be marked as non-faulting.
// Certain others (eg from refanytype) may not be.
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
GenTree* handleTreeInternal = tree->AsOp()->gtOp1;
if ((handleTreeInternal->OperGet() == GT_CNS_INT) && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
{
// These handle constants should be class handles.
assert(handleTreeInternal->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)handleTreeInternal->AsIntCon()->gtCompileTimeHandle;
}
}
}
return result;
}
//------------------------------------------------------------------------
// gtFoldExprSpecial -- optimize binary ops with one constant operand
//
// Arguments:
// tree - tree to optimize
//
// Return value:
// Tree (possibly modified at root or below), or a new tree
// Any new tree is fully morphed, if necessary.
//
GenTree* Compiler::gtFoldExprSpecial(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
genTreeOps oper = tree->OperGet();
GenTree* op;
GenTree* cons;
ssize_t val;
assert(tree->OperKind() & GTK_BINOP);
/* Filter out operators that cannot be folded here */
if (oper == GT_CAST)
{
return tree;
}
/* We only consider TYP_INT for folding
* Do not fold pointer arithmetic (e.g. addressing modes!) */
if (oper != GT_QMARK && !varTypeIsIntOrI(tree->gtType))
{
return tree;
}
/* Find out which is the constant node */
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
/* Get the constant value */
val = cons->AsIntConCommon()->IconValue();
// Transforms that would drop op cannot be performed if op has side effects
bool opHasSideEffects = (op->gtFlags & GTF_SIDE_EFFECT) != 0;
// Helper function that creates a new IntCon node and morphs it, if required
auto NewMorphedIntConNode = [&](int value) -> GenTreeIntCon* {
GenTreeIntCon* icon = gtNewIconNode(value);
if (fgGlobalMorph)
{
fgMorphTreeDone(icon);
}
return icon;
};
// Here `op` is the non-constant operand, `cons` is the constant operand
// and `val` is the constant value.
switch (oper)
{
case GT_LE:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 <= x) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_GE:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x >= 0) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_LT:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x < 0) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
break;
case GT_GT:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 > x) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
FALLTHROUGH;
case GT_EQ:
case GT_NE:
// Optimize boxed value classes; these are always false. This IL is
// generated when a generic value is tested against null:
// <T> ... foo(T x) { ... if ((object)x == null) ...
if ((val == 0) && op->IsBoxedValue())
{
JITDUMP("\nAttempting to optimize BOX(valueType) %s null [%06u]\n", GenTree::OpName(oper),
dspTreeID(tree));
// We don't expect GT_GT with signed compares, and we
// can't predict the result if we do see it, since the
// boxed object addr could have its high bit set.
if ((oper == GT_GT) && !tree->IsUnsigned())
{
JITDUMP(" bailing; unexpected signed compare via GT_GT\n");
}
else
{
// The tree under the box must be side effect free
// since we will drop it if we optimize.
assert(!gtTreeHasSideEffects(op->AsBox()->BoxOp(), GTF_SIDE_EFFECT));
// See if we can optimize away the box and related statements.
GenTree* boxSourceTree = gtTryRemoveBoxUpstreamEffects(op);
bool didOptimize = (boxSourceTree != nullptr);
// If optimization succeeded, remove the box.
if (didOptimize)
{
// Set up the result of the compare.
int compareResult = 0;
if (oper == GT_GT)
{
// GT_GT(null, box) == false
// GT_GT(box, null) == true
compareResult = (op1 == op);
}
else if (oper == GT_EQ)
{
// GT_EQ(box, null) == false
// GT_EQ(null, box) == false
compareResult = 0;
}
else
{
assert(oper == GT_NE);
// GT_NE(box, null) == true
// GT_NE(null, box) == true
compareResult = 1;
}
JITDUMP("\nSuccess: replacing BOX(valueType) %s null with %d\n", GenTree::OpName(oper),
compareResult);
return NewMorphedIntConNode(compareResult);
}
}
}
else
{
return gtFoldBoxNullable(tree);
}
break;
case GT_ADD:
if (val == 0)
{
goto DONE_FOLD;
}
break;
case GT_MUL:
if (val == 1)
{
goto DONE_FOLD;
}
else if (val == 0)
{
/* Multiply by zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_DIV:
case GT_UDIV:
if ((op2 == cons) && (val == 1) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_SUB:
if ((op2 == cons) && (val == 0) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_AND:
if (val == 0)
{
/* AND with zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
else
{
/* The GTF_BOOLEAN flag is set for nodes that are part
* of a boolean expression, thus all their children
* are known to evaluate to only 0 or 1 */
if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1
* AND with 1 stays the same */
assert(val == 1);
goto DONE_FOLD;
}
}
break;
case GT_OR:
if (val == 0)
{
goto DONE_FOLD;
}
else if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1 - OR with 1 is 1 */
assert(val == 1);
/* OR with one - return the 'one' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
if (val == 0)
{
if (op2 == cons)
{
goto DONE_FOLD;
}
else if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_QMARK:
{
assert(op1 == cons && op2 == op && op2->gtOper == GT_COLON);
assert(op2->AsOp()->gtOp1 && op2->AsOp()->gtOp2);
assert(val == 0 || val == 1);
if (val)
{
op = op2->AsColon()->ThenNode();
}
else
{
op = op2->AsColon()->ElseNode();
}
// Clear colon flags only if the qmark itself is not conditionaly executed
if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&op, gtClearColonCond);
}
}
goto DONE_FOLD;
default:
break;
}
/* The node is not foldable */
return tree;
DONE_FOLD:
/* The node has beeen folded into 'op' */
// If there was an assigment update, we just morphed it into
// a use, update the flags appropriately
if (op->gtOper == GT_LCL_VAR)
{
assert(tree->OperIs(GT_ASG) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_DEF)) == 0);
op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_DEF);
}
JITDUMP("\nFolding binary operator with a constant operand:\n");
DISPTREE(tree);
JITDUMP("Transformed into:\n");
DISPTREE(op);
return op;
}
//------------------------------------------------------------------------
// gtFoldBoxNullable -- optimize a boxed nullable feeding a compare to zero
//
// Arguments:
// tree - binop tree to potentially optimize, must be
// GT_GT, GT_EQ, or GT_NE
//
// Return value:
// Tree (possibly modified below the root).
//
GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
{
assert(tree->OperKind() & GTK_BINOP);
assert(tree->OperIs(GT_GT, GT_EQ, GT_NE));
genTreeOps const oper = tree->OperGet();
if ((oper == GT_GT) && !tree->IsUnsigned())
{
return tree;
}
GenTree* const op1 = tree->AsOp()->gtOp1;
GenTree* const op2 = tree->AsOp()->gtOp2;
GenTree* op;
GenTree* cons;
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
ssize_t const val = cons->AsIntConCommon()->IconValue();
if (val != 0)
{
return tree;
}
if (!op->IsCall())
{
return tree;
}
GenTreeCall* const call = op->AsCall();
if (!call->IsHelperCall(this, CORINFO_HELP_BOX_NULLABLE))
{
return tree;
}
JITDUMP("\nAttempting to optimize BOX_NULLABLE(&x) %s null [%06u]\n", GenTree::OpName(oper), dspTreeID(tree));
// Get the address of the struct being boxed
GenTree* const arg = call->gtCallArgs->GetNext()->GetNode();
if (arg->OperIs(GT_ADDR) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
CORINFO_CLASS_HANDLE nullableHnd = gtGetStructHandle(arg->AsOp()->gtOp1);
CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(nullableHnd, 0);
// Replace the box with an access of the nullable 'hasValue' field.
JITDUMP("\nSuccess: replacing BOX_NULLABLE(&x) [%06u] with x.hasValue\n", dspTreeID(op));
GenTree* newOp = gtNewFieldRef(TYP_BOOL, fieldHnd, arg, 0);
if (op == op1)
{
tree->AsOp()->gtOp1 = newOp;
}
else
{
tree->AsOp()->gtOp2 = newOp;
}
cons->gtType = TYP_INT;
}
return tree;
}
//------------------------------------------------------------------------
// gtTryRemoveBoxUpstreamEffects: given an unused value type box,
// try and remove the upstream allocation and unnecessary parts of
// the copy.
//
// Arguments:
// op - the box node to optimize
// options - controls whether and how trees are modified
// (see notes)
//
// Return Value:
// A tree representing the original value to box, if removal
// is successful/possible (but see note). nullptr if removal fails.
//
// Notes:
// Value typed box gets special treatment because it has associated
// side effects that can be removed if the box result is not used.
//
// By default (options == BR_REMOVE_AND_NARROW) this method will
// try and remove unnecessary trees and will try and reduce remaning
// operations to the minimal set, possibly narrowing the width of
// loads from the box source if it is a struct.
//
// To perform a trial removal, pass BR_DONT_REMOVE. This can be
// useful to determine if this optimization should only be
// performed if some other conditions hold true.
//
// To remove but not alter the access to the box source, pass
// BR_REMOVE_BUT_NOT_NARROW.
//
// To remove and return the tree for the type handle used for
// the boxed newobj, pass BR_REMOVE_BUT_NOT_NARROW_WANT_TYPE_HANDLE.
// This can be useful when the only part of the box that is "live"
// is its type.
//
// If removal fails, is is possible that a subsequent pass may be
// able to optimize. Blocking side effects may now be minimized
// (null or bounds checks might have been removed) or might be
// better known (inline return placeholder updated with the actual
// return expression). So the box is perhaps best left as is to
// help trigger this re-examination.
GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions options)
{
assert(op->IsBoxedValue());
// grab related parts for the optimization
GenTreeBox* box = op->AsBox();
Statement* asgStmt = box->gtAsgStmtWhenInlinedBoxValue;
Statement* copyStmt = box->gtCopyStmtWhenInlinedBoxValue;
JITDUMP("gtTryRemoveBoxUpstreamEffects: %s to %s of BOX (valuetype)"
" [%06u] (assign/newobj " FMT_STMT " copy " FMT_STMT "\n",
(options == BR_DONT_REMOVE) ? "checking if it is possible" : "attempting",
(options == BR_MAKE_LOCAL_COPY) ? "make local unboxed version" : "remove side effects", dspTreeID(op),
asgStmt->GetID(), copyStmt->GetID());
// If we don't recognize the form of the assign, bail.
GenTree* asg = asgStmt->GetRootNode();
if (asg->gtOper != GT_ASG)
{
JITDUMP(" bailing; unexpected assignment op %s\n", GenTree::OpName(asg->gtOper));
return nullptr;
}
// If we're eventually going to return the type handle, remember it now.
GenTree* boxTypeHandle = nullptr;
if ((options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE) || (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE))
{
GenTree* asgSrc = asg->AsOp()->gtOp2;
genTreeOps asgSrcOper = asgSrc->OperGet();
// Allocation may be via AllocObj or via helper call, depending
// on when this is invoked and whether the jit is using AllocObj
// for R2R allocations.
if (asgSrcOper == GT_ALLOCOBJ)
{
GenTreeAllocObj* allocObj = asgSrc->AsAllocObj();
boxTypeHandle = allocObj->AsOp()->gtOp1;
}
else if (asgSrcOper == GT_CALL)
{
GenTreeCall* newobjCall = asgSrc->AsCall();
GenTreeCall::Use* newobjArgs = newobjCall->gtCallArgs;
// In R2R expansions the handle may not be an explicit operand to the helper,
// so we can't remove the box.
if (newobjArgs == nullptr)
{
assert(newobjCall->IsHelperCall(this, CORINFO_HELP_READYTORUN_NEW));
JITDUMP(" bailing; newobj via R2R helper\n");
return nullptr;
}
boxTypeHandle = newobjArgs->GetNode();
}
else
{
unreached();
}
assert(boxTypeHandle != nullptr);
}
// If we don't recognize the form of the copy, bail.
GenTree* copy = copyStmt->GetRootNode();
if (copy->gtOper != GT_ASG)
{
// GT_RET_EXPR is a tolerable temporary failure.
// The jit will revisit this optimization after
// inlining is done.
if (copy->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy %s\n", GenTree::OpName(copy->gtOper));
}
else
{
// Anything else is a missed case we should
// figure out how to handle. One known case
// is GT_COMMAs enclosing the GT_ASG we are
// looking for.
JITDUMP(" bailing; unexpected copy op %s\n", GenTree::OpName(copy->gtOper));
}
return nullptr;
}
// Handle case where we are optimizing the box into a local copy
if (options == BR_MAKE_LOCAL_COPY)
{
// Drill into the box to get at the box temp local and the box type
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
assert(lvaTable[boxTempLcl].lvType == TYP_REF);
CORINFO_CLASS_HANDLE boxClass = lvaTable[boxTempLcl].lvClassHnd;
assert(boxClass != nullptr);
// Verify that the copyDst has the expected shape
// (blk|obj|ind (add (boxTempLcl, ptr-size)))
//
// The shape here is constrained to the patterns we produce
// over in impImportAndPushBox for the inlined box case.
GenTree* copyDst = copy->AsOp()->gtOp1;
if (!copyDst->OperIs(GT_BLK, GT_IND, GT_OBJ))
{
JITDUMP("Unexpected copy dest operator %s\n", GenTree::OpName(copyDst->gtOper));
return nullptr;
}
GenTree* copyDstAddr = copyDst->AsOp()->gtOp1;
if (copyDstAddr->OperGet() != GT_ADD)
{
JITDUMP("Unexpected copy dest address tree\n");
return nullptr;
}
GenTree* copyDstAddrOp1 = copyDstAddr->AsOp()->gtOp1;
if ((copyDstAddrOp1->OperGet() != GT_LCL_VAR) || (copyDstAddrOp1->AsLclVarCommon()->GetLclNum() != boxTempLcl))
{
JITDUMP("Unexpected copy dest address 1st addend\n");
return nullptr;
}
GenTree* copyDstAddrOp2 = copyDstAddr->AsOp()->gtOp2;
if (!copyDstAddrOp2->IsIntegralConst(TARGET_POINTER_SIZE))
{
JITDUMP("Unexpected copy dest address 2nd addend\n");
return nullptr;
}
// Screening checks have all passed. Do the transformation.
//
// Retype the box temp to be a struct
JITDUMP("Retyping box temp V%02u to struct %s\n", boxTempLcl, eeGetClassName(boxClass));
lvaTable[boxTempLcl].lvType = TYP_UNDEF;
const bool isUnsafeValueClass = false;
lvaSetStruct(boxTempLcl, boxClass, isUnsafeValueClass);
var_types boxTempType = lvaTable[boxTempLcl].lvType;
// Remove the newobj and assigment to box temp
JITDUMP("Bashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Update the copy from the value to be boxed to the box temp
GenTree* newDst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
copyDst->AsOp()->gtOp1 = newDst;
// Return the address of the now-struct typed box temp
GenTree* retValue = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
return retValue;
}
// If the copy is a struct copy, make sure we know how to isolate
// any source side effects.
GenTree* copySrc = copy->AsOp()->gtOp2;
// If the copy source is from a pending inline, wait for it to resolve.
if (copySrc->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy source %s\n", GenTree::OpName(copySrc->gtOper));
return nullptr;
}
bool hasSrcSideEffect = false;
bool isStructCopy = false;
if (gtTreeHasSideEffects(copySrc, GTF_SIDE_EFFECT))
{
hasSrcSideEffect = true;
if (varTypeIsStruct(copySrc->gtType))
{
isStructCopy = true;
if ((copySrc->gtOper != GT_OBJ) && (copySrc->gtOper != GT_IND) && (copySrc->gtOper != GT_FIELD))
{
// We don't know how to handle other cases, yet.
JITDUMP(" bailing; unexpected copy source struct op with side effect %s\n",
GenTree::OpName(copySrc->gtOper));
return nullptr;
}
}
}
// If this was a trial removal, we're done.
if (options == BR_DONT_REMOVE)
{
return copySrc;
}
if (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
// Otherwise, proceed with the optimization.
//
// Change the assignment expression to a NOP.
JITDUMP("\nBashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Change the copy expression so it preserves key
// source side effects.
JITDUMP("\nBashing COPY [%06u]", dspTreeID(copy));
if (!hasSrcSideEffect)
{
// If there were no copy source side effects just bash
// the copy to a NOP.
copy->gtBashToNOP();
JITDUMP(" to NOP; no source side effects.\n");
}
else if (!isStructCopy)
{
// For scalar types, go ahead and produce the
// value as the copy is fairly cheap and likely
// the optimizer can trim things down to just the
// minimal side effect parts.
copyStmt->SetRootNode(copySrc);
JITDUMP(" to scalar read via [%06u]\n", dspTreeID(copySrc));
}
else
{
// For struct types read the first byte of the
// source struct; there's no need to read the
// entire thing, and no place to put it.
assert(copySrc->OperIs(GT_OBJ, GT_IND, GT_FIELD));
copyStmt->SetRootNode(copySrc);
if (options == BR_REMOVE_AND_NARROW || options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
JITDUMP(" to read first byte of struct via modified [%06u]\n", dspTreeID(copySrc));
gtChangeOperToNullCheck(copySrc, compCurBB);
}
else
{
JITDUMP(" to read entire struct via modified [%06u]\n", dspTreeID(copySrc));
}
}
if (fgStmtListThreaded)
{
fgSetStmtSeq(asgStmt);
fgSetStmtSeq(copyStmt);
}
// Box effects were successfully optimized.
if (options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
else
{
return copySrc;
}
}
//------------------------------------------------------------------------
// gtOptimizeEnumHasFlag: given the operands for a call to Enum.HasFlag,
// try and optimize the call to a simple and/compare tree.
//
// Arguments:
// thisOp - first argument to the call
// flagOp - second argument to the call
//
// Return Value:
// A new cmp/amd tree if successful. nullptr on failure.
//
// Notes:
// If successful, may allocate new temps and modify connected
// statements.
GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp)
{
JITDUMP("Considering optimizing call to Enum.HasFlag....\n");
// Operands must be boxes
if (!thisOp->IsBoxedValue() || !flagOp->IsBoxedValue())
{
JITDUMP("bailing, need both inputs to be BOXes\n");
return nullptr;
}
// Operands must have same type
bool isExactThis = false;
bool isNonNullThis = false;
CORINFO_CLASS_HANDLE thisHnd = gtGetClassHandle(thisOp, &isExactThis, &isNonNullThis);
if (thisHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'this' operand\n");
return nullptr;
}
// A boxed thisOp should have exact type and non-null instance
assert(isExactThis);
assert(isNonNullThis);
bool isExactFlag = false;
bool isNonNullFlag = false;
CORINFO_CLASS_HANDLE flagHnd = gtGetClassHandle(flagOp, &isExactFlag, &isNonNullFlag);
if (flagHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'flag' operand\n");
return nullptr;
}
// A boxed flagOp should have exact type and non-null instance
assert(isExactFlag);
assert(isNonNullFlag);
if (flagHnd != thisHnd)
{
JITDUMP("bailing, operand types differ\n");
return nullptr;
}
// If we have a shared type instance we can't safely check type
// equality, so bail.
DWORD classAttribs = info.compCompHnd->getClassAttribs(thisHnd);
if (classAttribs & CORINFO_FLG_SHAREDINST)
{
JITDUMP("bailing, have shared instance type\n");
return nullptr;
}
// Simulate removing the box for thisOP. We need to know that it can
// be safely removed before we can optimize.
GenTree* thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_DONT_REMOVE);
if (thisVal == nullptr)
{
// Note we may fail here if the this operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'this' operand\n");
return nullptr;
}
// Do likewise with flagOp.
GenTree* flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_DONT_REMOVE);
if (flagVal == nullptr)
{
// Note we may fail here if the flag operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'flag' operand\n");
return nullptr;
}
// Only proceed when both box sources have the same actual type.
// (this rules out long/int mismatches)
if (genActualType(thisVal->TypeGet()) != genActualType(flagVal->TypeGet()))
{
JITDUMP("bailing, pre-boxed values have different types\n");
return nullptr;
}
// Yes, both boxes can be cleaned up. Optimize.
JITDUMP("Optimizing call to Enum.HasFlag\n");
// Undo the boxing of the Ops and prepare to operate directly
// on the pre-boxed values.
thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_REMOVE_BUT_NOT_NARROW);
flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_REMOVE_BUT_NOT_NARROW);
// Our trial removals above should guarantee successful removals here.
assert(thisVal != nullptr);
assert(flagVal != nullptr);
assert(genActualType(thisVal->TypeGet()) == genActualType(flagVal->TypeGet()));
// Type to use for optimized check
var_types type = genActualType(thisVal->TypeGet());
// The thisVal and flagVal trees come from earlier statements.
//
// Unless they are invariant values, we need to evaluate them both
// to temps at those points to safely transmit the values here.
//
// Also we need to use the flag twice, so we need two trees for it.
GenTree* thisValOpt = nullptr;
GenTree* flagValOpt = nullptr;
GenTree* flagValOptCopy = nullptr;
if (thisVal->IsIntegralConst())
{
thisValOpt = gtClone(thisVal);
assert(thisValOpt != nullptr);
}
else
{
const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp"));
GenTree* thisAsg = gtNewTempAssign(thisTmp, thisVal);
Statement* thisAsgStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
thisAsgStmt->SetRootNode(thisAsg);
thisValOpt = gtNewLclvNode(thisTmp, type);
}
if (flagVal->IsIntegralConst())
{
flagValOpt = gtClone(flagVal);
assert(flagValOpt != nullptr);
flagValOptCopy = gtClone(flagVal);
assert(flagValOptCopy != nullptr);
}
else
{
const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp"));
GenTree* flagAsg = gtNewTempAssign(flagTmp, flagVal);
Statement* flagAsgStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
flagAsgStmt->SetRootNode(flagAsg);
flagValOpt = gtNewLclvNode(flagTmp, type);
flagValOptCopy = gtNewLclvNode(flagTmp, type);
}
// Turn the call into (thisValTmp & flagTmp) == flagTmp.
GenTree* andTree = gtNewOperNode(GT_AND, type, thisValOpt, flagValOpt);
GenTree* cmpTree = gtNewOperNode(GT_EQ, TYP_INT, andTree, flagValOptCopy);
JITDUMP("Optimized call to Enum.HasFlag\n");
return cmpTree;
}
/*****************************************************************************
*
* Fold the given constant tree.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::gtFoldExprConst(GenTree* tree)
{
SSIZE_T i1, i2, itemp;
INT64 lval1, lval2, ltemp;
float f1, f2;
double d1, d2;
var_types switchType;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
assert(tree->OperIsUnary() || tree->OperIsBinary());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2IfPresent();
if (!opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
return tree;
}
if (tree->OperIs(GT_NOP, GT_ALLOCOBJ, GT_RUNTIMELOOKUP))
{
return tree;
}
// This condition exists to preserve previous behavior.
// TODO-CQ: enable folding for bounds checks nodes.
if (tree->OperIs(GT_BOUNDS_CHECK))
{
return tree;
}
#ifdef FEATURE_SIMD
if (tree->OperIs(GT_SIMD))
{
return tree;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (tree->OperIs(GT_HWINTRINSIC))
{
return tree;
}
#endif
if (tree->OperIsUnary())
{
assert(op1->OperIsConst());
switch (op1->TypeGet())
{
case TYP_INT:
// Fold constant INT unary operator.
if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = (INT32)op1->AsIntCon()->IconValue();
// If we fold a unary oper, then the folded constant
// is considered a ConstantIndexField if op1 was one.
if ((op1->AsIntCon()->gtFieldSeq != nullptr) && op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
switch (tree->OperGet())
{
case GT_NOT:
i1 = ~i1;
break;
case GT_NEG:
i1 = -i1;
break;
case GT_BSWAP:
i1 = ((i1 >> 24) & 0xFF) | ((i1 >> 8) & 0xFF00) | ((i1 << 8) & 0xFF0000) |
((i1 << 24) & 0xFF000000);
break;
case GT_BSWAP16:
i1 = ((i1 >> 8) & 0xFF) | ((i1 << 8) & 0xFF00);
break;
case GT_CAST:
// assert (genActualType(tree->CastToType()) == tree->TypeGet());
if (tree->gtOverflow() &&
CheckedOps::CastFromIntOverflows((INT32)i1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(i1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(i1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(i1));
goto CNS_INT;
case TYP_BOOL:
case TYP_UBYTE:
i1 = INT32(UINT8(i1));
goto CNS_INT;
case TYP_UINT:
case TYP_INT:
goto CNS_INT;
case TYP_ULONG:
if (tree->IsUnsigned())
{
lval1 = UINT64(UINT32(i1));
}
else
{
lval1 = UINT64(INT32(i1));
}
goto CNS_LONG;
case TYP_LONG:
if (tree->IsUnsigned())
{
lval1 = INT64(UINT32(i1));
}
else
{
lval1 = INT64(INT32(i1));
}
goto CNS_LONG;
case TYP_FLOAT:
if (tree->IsUnsigned())
{
f1 = forceCastToFloat(UINT32(i1));
}
else
{
f1 = forceCastToFloat(INT32(i1));
}
d1 = f1;
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (tree->IsUnsigned())
{
d1 = (double)UINT32(i1);
}
else
{
d1 = (double)INT32(i1);
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from int");
return tree;
}
default:
return tree;
}
goto CNS_INT;
case TYP_LONG:
// Fold constant LONG unary operator.
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
switch (tree->OperGet())
{
case GT_NOT:
lval1 = ~lval1;
break;
case GT_NEG:
lval1 = -lval1;
break;
case GT_BSWAP:
lval1 = ((lval1 >> 56) & 0xFF) | ((lval1 >> 40) & 0xFF00) | ((lval1 >> 24) & 0xFF0000) |
((lval1 >> 8) & 0xFF000000) | ((lval1 << 8) & 0xFF00000000) |
((lval1 << 24) & 0xFF0000000000) | ((lval1 << 40) & 0xFF000000000000) |
((lval1 << 56) & 0xFF00000000000000);
break;
case GT_CAST:
assert(tree->TypeIs(genActualType(tree->CastToType())));
if (tree->gtOverflow() &&
CheckedOps::CastFromLongOverflows(lval1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(lval1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(lval1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(lval1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(lval1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(lval1);
goto CNS_INT;
case TYP_UINT:
i1 = UINT32(lval1);
goto CNS_INT;
case TYP_ULONG:
case TYP_LONG:
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->IsUnsigned() && (lval1 < 0))
{
d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
}
else
{
d1 = (double)lval1;
}
if (tree->CastToType() == TYP_FLOAT)
{
f1 = forceCastToFloat(d1); // truncate precision
d1 = f1;
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from long");
return tree;
}
default:
return tree;
}
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
assert(op1->OperIs(GT_CNS_DBL));
// Fold constant DOUBLE unary operator.
d1 = op1->AsDblCon()->gtDconVal;
switch (tree->OperGet())
{
case GT_NEG:
d1 = -d1;
break;
case GT_CAST:
f1 = forceCastToFloat(d1);
if ((op1->TypeIs(TYP_DOUBLE) && CheckedOps::CastFromDoubleOverflows(d1, tree->CastToType())) ||
(op1->TypeIs(TYP_FLOAT) && CheckedOps::CastFromFloatOverflows(f1, tree->CastToType())))
{
// The conversion overflows. The ECMA spec says, in III 3.27, that
// "...if overflow occurs converting a floating point type to an integer, ...,
// the value returned is unspecified." However, it would at least be
// desirable to have the same value returned for casting an overflowing
// constant to an int as would be obtained by passing that constant as
// a parameter and then casting that parameter to an int type.
// Don't fold overflowing converions, as the value returned by
// JIT's codegen doesn't always match with the C compiler's cast result.
// We want the behavior to be the same with or without folding.
return tree;
}
assert(tree->TypeIs(genActualType(tree->CastToType())));
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(d1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(d1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(d1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(d1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(d1);
goto CNS_INT;
case TYP_UINT:
i1 = forceCastToUInt32(d1);
goto CNS_INT;
case TYP_LONG:
lval1 = INT64(d1);
goto CNS_LONG;
case TYP_ULONG:
lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
goto CNS_LONG;
case TYP_FLOAT:
d1 = forceCastToFloat(d1);
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (op1->TypeIs(TYP_FLOAT))
{
d1 = forceCastToFloat(d1); // Truncate precision.
}
goto CNS_DOUBLE; // Redundant cast.
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from double/float");
break;
}
return tree;
default:
return tree;
}
goto CNS_DOUBLE;
default:
// Not a foldable typ - e.g. RET const.
return tree;
}
}
// We have a binary operator.
assert(tree->OperIsBinary());
assert(op2 != nullptr);
assert(op1->OperIsConst());
assert(op2->OperIsConst());
if (tree->OperIs(GT_COMMA))
{
return op2;
}
switchType = op1->TypeGet();
// Normally we will just switch on op1 types, but for the case where
// only op2 is a GC type and op1 is not a GC type, we use the op2 type.
// This makes us handle this as a case of folding for GC type.
if (varTypeIsGC(op2->gtType) && !varTypeIsGC(op1->gtType))
{
switchType = op2->TypeGet();
}
switch (switchType)
{
// Fold constant REF of BYREF binary operator.
// These can only be comparisons or null pointers.
case TYP_REF:
// String nodes are an RVA at this point.
if (op1->OperIs(GT_CNS_STR) || op2->OperIs(GT_CNS_STR))
{
// Fold "ldstr" ==/!= null.
if (op2->IsIntegralConst(0))
{
if (tree->OperIs(GT_EQ))
{
i1 = 0;
goto FOLD_COND;
}
if (tree->OperIs(GT_NE) || (tree->OperIs(GT_GT) && tree->IsUnsigned()))
{
i1 = 1;
goto FOLD_COND;
}
}
return tree;
}
FALLTHROUGH;
case TYP_BYREF:
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (i1 == i2);
goto FOLD_COND;
case GT_NE:
i1 = (i1 != i2);
goto FOLD_COND;
case GT_ADD:
noway_assert(!tree->TypeIs(TYP_REF));
// We only fold a GT_ADD that involves a null reference.
if ((op1->TypeIs(TYP_REF) && (i1 == 0)) || (op2->TypeIs(TYP_REF) && (i2 == 0)))
{
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Fold into GT_IND of null byref.
tree->BashToConst(0, TYP_BYREF);
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("\nFolded to null byref:\n");
DISPTREE(tree);
goto DONE;
}
break;
default:
break;
}
return tree;
// Fold constant INT binary operator.
case TYP_INT:
assert(tree->TypeIs(TYP_INT) || varTypeIsGC(tree) || tree->OperIs(GT_MKREFANY));
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (INT32(i1) == INT32(i2));
break;
case GT_NE:
i1 = (INT32(i1) != INT32(i2));
break;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) < UINT32(i2));
}
else
{
i1 = (INT32(i1) < INT32(i2));
}
break;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) <= UINT32(i2));
}
else
{
i1 = (INT32(i1) <= INT32(i2));
}
break;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) >= UINT32(i2));
}
else
{
i1 = (INT32(i1) >= INT32(i2));
}
break;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) > UINT32(i2));
}
else
{
i1 = (INT32(i1) > INT32(i2));
}
break;
case GT_ADD:
itemp = i1 + i2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
break;
case GT_SUB:
itemp = i1 - i2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
break;
case GT_MUL:
itemp = i1 * i2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
// For the very particular case of the "constant array index" pseudo-field, we
// assume that multiplication is by the field width, and preserves that field.
// This could obviously be made more robust by a more complicated set of annotations...
if ((op1->AsIntCon()->gtFieldSeq != nullptr) &&
op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
else if ((op2->AsIntCon()->gtFieldSeq != nullptr) &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op1->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op2->AsIntCon()->gtFieldSeq;
}
i1 = itemp;
break;
case GT_OR:
i1 |= i2;
break;
case GT_XOR:
i1 ^= i2;
break;
case GT_AND:
i1 &= i2;
break;
case GT_LSH:
i1 <<= (i2 & 0x1f);
break;
case GT_RSH:
i1 >>= (i2 & 0x1f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
i1 = UINT32(i1) >> (i2 & 0x1f);
break;
case GT_ROL:
i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
break;
case GT_ROR:
i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
break;
// DIV and MOD can throw an exception - if the division is by 0
// or there is overflow - when dividing MIN by -1.
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (INT32(i2) == 0)
{
// Division by zero.
// We have to evaluate this expression and throw an exception.
return tree;
}
else if ((INT32(i2) == -1) && (UINT32(i1) == 0x80000000))
{
// Overflow Division.
// We have to evaluate this expression and throw an exception.
return tree;
}
if (tree->OperIs(GT_DIV))
{
i1 = INT32(i1) / INT32(i2);
}
else if (tree->OperIs(GT_MOD))
{
i1 = INT32(i1) % INT32(i2);
}
else if (tree->OperIs(GT_UDIV))
{
i1 = UINT32(i1) / UINT32(i2);
}
else
{
assert(tree->OperIs(GT_UMOD));
i1 = UINT32(i1) % UINT32(i2);
}
break;
default:
return tree;
}
// We get here after folding to a GT_CNS_INT type.
// change the node to the new type / value and make sure the node sizes are OK.
CNS_INT:
FOLD_COND:
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Also all conditional folding jumps here since the node hanging from
// GT_JTRUE has to be a GT_CNS_INT - value 0 or 1.
// Some operations are performed as 64 bit instead of 32 bit so the upper 32 bits
// need to be discarded. Since constant values are stored as ssize_t and the node
// has TYP_INT the result needs to be sign extended rather than zero extended.
tree->BashToConst(static_cast<int>(i1));
tree->AsIntCon()->gtFieldSeq = fieldSeq;
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to int constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant LONG binary operator.
case TYP_LONG:
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
// op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case
// it is a TYP_INT.
assert(op2->TypeIs(TYP_LONG, TYP_INT));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
// For the shift operators we can have a op2 that is a TYP_INT.
// Thus we cannot just use LngValue(), as it will assert on 32 bit if op2 is not GT_CNS_LNG.
lval2 = op2->AsIntConCommon()->IntegralValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (lval1 == lval2);
goto FOLD_COND;
case GT_NE:
i1 = (lval1 != lval2);
goto FOLD_COND;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) < UINT64(lval2));
}
else
{
i1 = (lval1 < lval2);
}
goto FOLD_COND;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) <= UINT64(lval2));
}
else
{
i1 = (lval1 <= lval2);
}
goto FOLD_COND;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) >= UINT64(lval2));
}
else
{
i1 = (lval1 >= lval2);
}
goto FOLD_COND;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) > UINT64(lval2));
}
else
{
i1 = (lval1 > lval2);
}
goto FOLD_COND;
case GT_ADD:
ltemp = lval1 + lval2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
#ifdef TARGET_64BIT
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
#endif
break;
case GT_SUB:
ltemp = lval1 - lval2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_MUL:
ltemp = lval1 * lval2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_OR:
lval1 |= lval2;
break;
case GT_XOR:
lval1 ^= lval2;
break;
case GT_AND:
lval1 &= lval2;
break;
case GT_LSH:
lval1 <<= (lval2 & 0x3f);
break;
case GT_RSH:
lval1 >>= (lval2 & 0x3f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
lval1 = UINT64(lval1) >> (lval2 & 0x3f);
break;
case GT_ROL:
lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
break;
case GT_ROR:
lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
break;
// Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
// that behavior here.
case GT_DIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 /= lval2;
break;
case GT_MOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 %= lval2;
break;
case GT_UDIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) / UINT64(lval2);
break;
case GT_UMOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) % UINT64(lval2);
break;
default:
return tree;
}
CNS_LONG:
#if !defined(TARGET_64BIT)
if (fieldSeq != FieldSeqStore::NotAField())
{
assert(!"Field sequences on CNS_LNG nodes!?");
return tree;
}
#endif // !defined(TARGET_64BIT)
JITDUMP("\nFolding long operator with constant nodes into a constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(lval1);
#ifdef TARGET_64BIT
tree->AsIntCon()->gtFieldSeq = fieldSeq;
#endif
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to long constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant FLOAT or DOUBLE binary operator
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->gtOverflowEx())
{
return tree;
}
assert(op1->OperIs(GT_CNS_DBL));
d1 = op1->AsDblCon()->gtDconVal;
assert(varTypeIsFloating(op2->TypeGet()));
assert(op2->OperIs(GT_CNS_DBL));
d2 = op2->AsDblCon()->gtDconVal;
// Special case - check if we have NaN operands.
// For comparisons if not an unordered operation always return 0.
// For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
// the result is always true - return 1.
if (_isnan(d1) || _isnan(d2))
{
JITDUMP("Double operator(s) is NaN\n");
if (tree->OperIsCompare())
{
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
// Unordered comparison with NaN always succeeds.
i1 = 1;
goto FOLD_COND;
}
else
{
// Normal comparison with NaN always fails.
i1 = 0;
goto FOLD_COND;
}
}
}
switch (tree->OperGet())
{
case GT_EQ:
i1 = (d1 == d2);
goto FOLD_COND;
case GT_NE:
i1 = (d1 != d2);
goto FOLD_COND;
case GT_LT:
i1 = (d1 < d2);
goto FOLD_COND;
case GT_LE:
i1 = (d1 <= d2);
goto FOLD_COND;
case GT_GE:
i1 = (d1 >= d2);
goto FOLD_COND;
case GT_GT:
i1 = (d1 > d2);
goto FOLD_COND;
// Floating point arithmetic should be done in declared
// precision while doing constant folding. For this reason though TYP_FLOAT
// constants are stored as double constants, while performing float arithmetic,
// double constants should be converted to float. Here is an example case
// where performing arithmetic in double precision would lead to incorrect
// results.
//
// Example:
// float a = float.MaxValue;
// float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
// precision.
// flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
case GT_ADD:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 + f2);
}
else
{
d1 += d2;
}
break;
case GT_SUB:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 - f2);
}
else
{
d1 -= d2;
}
break;
case GT_MUL:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 * f2);
}
else
{
d1 *= d2;
}
break;
case GT_DIV:
// We do not fold division by zero, even for floating point.
// This is because the result will be platform-dependent for an expression like 0d / 0d.
if (d2 == 0)
{
return tree;
}
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 / f2);
}
else
{
d1 /= d2;
}
break;
default:
return tree;
}
CNS_DOUBLE:
JITDUMP("\nFolding fp operator with constant nodes into a fp constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(d1, tree->TypeGet());
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to fp constant:\n");
DISPTREE(tree);
goto DONE;
default:
// Not a foldable type.
return tree;
}
DONE:
// Make sure no side effect flags are set on this constant node.
tree->gtFlags &= ~GTF_ALL_EFFECT;
return tree;
INTEGRAL_OVF:
// This operation is going to cause an overflow exception. Morph into
// an overflow helper. Put a dummy constant value for code generation.
//
// We could remove all subsequent trees in the current basic block,
// unless this node is a child of GT_COLON
//
// NOTE: Since the folded value is not constant we should not change the
// "tree" node - otherwise we confuse the logic that checks if the folding
// was successful - instead use one of the operands, e.g. op1.
// Don't fold overflow operations if not global morph phase.
// The reason for this is that this optimization is replacing a gentree node
// with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
// involving overflow arithmetic. During assertion prop, it is possible
// that the 'arg' could be constant folded and the result could lead to an
// overflow. In such a case 'arg' will get replaced with GT_COMMA node
// but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
// update args table. For this reason this optimization is enabled only
// for global morphing phase.
//
// TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
if (!fgGlobalMorph)
{
assert(tree->gtOverflow());
return tree;
}
var_types type = genActualType(tree->TypeGet());
op1 = type == TYP_LONG ? gtNewLconNode(0) : gtNewIconNode(0);
if (vnStore != nullptr)
{
op1->gtVNPair.SetBoth(vnStore->VNZeroForType(type));
}
JITDUMP("\nFolding binary operator with constant nodes into a comma throw:\n");
DISPTREE(tree);
// We will change the cast to a GT_COMMA and attach the exception helper as AsOp()->gtOp1.
// The constant expression zero becomes op2.
assert(tree->gtOverflow());
assert(tree->OperIs(GT_ADD, GT_SUB, GT_CAST, GT_MUL));
assert(op1 != nullptr);
op2 = op1;
op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW, TYP_VOID, gtNewCallArgs(gtNewIconNode(compCurBB->bbTryIndex)));
// op1 is a call to the JIT helper that throws an Overflow exception.
// Attach the ExcSet for VNF_OverflowExc(Void) to this call.
if (vnStore != nullptr)
{
op1->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc,
vnStore->VNPForVoid())));
}
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), op1, op2);
return tree;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// gtNewTempAssign: Create an assignment of the given value to a temp.
//
// Arguments:
// tmp - local number for a compiler temp
// val - value to assign to the temp
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// Normally a new assignment node.
// However may return a nop node if val is simply a reference to the temp.
//
// Notes:
// Self-assignments may be represented via NOPs.
//
// May update the type of the temp, if it was previously unknown.
//
// May set compFloatingPointUsed.
GenTree* Compiler::gtNewTempAssign(
unsigned tmp, GenTree* val, Statement** pAfterStmt, const DebugInfo& di, BasicBlock* block)
{
// Self-assignment is a nop.
if (val->OperGet() == GT_LCL_VAR && val->AsLclVarCommon()->GetLclNum() == tmp)
{
return gtNewNothingNode();
}
LclVarDsc* varDsc = lvaGetDesc(tmp);
if (varDsc->TypeGet() == TYP_I_IMPL && val->TypeGet() == TYP_BYREF)
{
impBashVarAddrsToI(val);
}
var_types valTyp = val->TypeGet();
if (val->OperGet() == GT_LCL_VAR && lvaTable[val->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
valTyp = lvaGetRealType(val->AsLclVar()->GetLclNum());
val->gtType = valTyp;
}
var_types dstTyp = varDsc->TypeGet();
/* If the variable's lvType is not yet set then set it here */
if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
#if FEATURE_SIMD
if (varTypeIsSIMD(dstTyp))
{
varDsc->lvSIMDType = 1;
}
#endif
}
#ifdef DEBUG
// Make sure the actual types match.
if (genActualType(valTyp) != genActualType(dstTyp))
{
// Plus some other exceptions that are apparently legal:
// 1) TYP_REF or BYREF = TYP_I_IMPL
bool ok = false;
if (varTypeIsGC(dstTyp) && (valTyp == TYP_I_IMPL))
{
ok = true;
}
// 2) TYP_DOUBLE = TYP_FLOAT or TYP_FLOAT = TYP_DOUBLE
else if (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))
{
ok = true;
}
// 3) TYP_BYREF = TYP_REF when object stack allocation is enabled
else if (JitConfig.JitObjectStackAllocation() && (dstTyp == TYP_BYREF) && (valTyp == TYP_REF))
{
ok = true;
}
else if (!varTypeIsGC(dstTyp) && (genTypeSize(valTyp) == genTypeSize(dstTyp)))
{
// We can have assignments that require a change of register file, e.g. for arguments
// and call returns. Lowering and Codegen will handle these.
ok = true;
}
else if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_INT))
{
// It could come from `ASG(struct, 0)` that was propagated to `RETURN struct(0)`,
// and now it is merging to a struct again.
assert(tmp == genReturnLocal);
ok = true;
}
else if (varTypeIsSIMD(dstTyp) && (valTyp == TYP_STRUCT))
{
assert(val->IsCall());
ok = true;
}
if (!ok)
{
gtDispTree(val);
assert(!"Incompatible types for gtNewTempAssign");
}
}
#endif
// Added this noway_assert for runtime\issue 44895, to protect against silent bad codegen
//
if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_REF))
{
noway_assert(!"Incompatible types for gtNewTempAssign");
}
// Floating Point assignments can be created during inlining
// see "Zero init inlinee locals:" in fgInlinePrependStatements
// thus we may need to set compFloatingPointUsed to true here.
//
if (varTypeUsesFloatReg(dstTyp) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
/* Create the assignment node */
GenTree* asg;
GenTree* dest = gtNewLclvNode(tmp, dstTyp);
dest->gtFlags |= GTF_VAR_DEF;
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
// a null type handle and derive the necessary information about the type from its varType.
CORINFO_CLASS_HANDLE valStructHnd = gtGetStructHandleIfPresent(val);
if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
{
// There are 2 special cases:
// 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
// the field was transformed as IND opr GT_LCL_FLD;
// 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
// in these cases, we can use the type of the merge return for the assignment.
assert(val->gtEffectiveVal(true)->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
assert(tmp == genReturnLocal);
valStructHnd = lvaGetStruct(genReturnLocal);
assert(valStructHnd != NO_CLASS_HANDLE);
}
if ((valStructHnd != NO_CLASS_HANDLE) && val->IsConstInitVal())
{
asg = gtNewAssignNode(dest, val);
}
else if (varTypeIsStruct(varDsc) && ((valStructHnd != NO_CLASS_HANDLE) || varTypeIsSIMD(valTyp)))
{
// The struct value may be be a child of a GT_COMMA due to explicit null checks of indirs/fields.
GenTree* valx = val->gtEffectiveVal(/*commaOnly*/ true);
if (valStructHnd != NO_CLASS_HANDLE)
{
lvaSetStruct(tmp, valStructHnd, false);
}
else
{
assert(valx->gtOper != GT_OBJ);
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block);
}
else
{
// We may have a scalar type variable assigned a struct value, e.g. a 'genReturnLocal'
// when the ABI calls for returning a struct as a primitive type.
// TODO-1stClassStructs: When we stop "lying" about the types for ABI purposes, the
// 'genReturnLocal' should be the original struct type.
assert(!varTypeIsStruct(valTyp) || ((valStructHnd != NO_CLASS_HANDLE) &&
(typGetObjLayout(valStructHnd)->GetSize() == genTypeSize(varDsc))));
asg = gtNewAssignNode(dest, val);
}
if (compRationalIRForm)
{
Rationalizer::RewriteAssignmentIntoStoreLcl(asg->AsOp());
}
return asg;
}
/*****************************************************************************
*
* Create a helper call to access a COM field (iff 'assg' is non-zero this is
* an assignment and 'assg' is the new value).
*/
GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg)
{
assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDR_HELPER);
/* If we can't access it directly, we need to call a helper function */
GenTreeCall::Use* args = nullptr;
var_types helperType = TYP_BYREF;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_SET)
{
assert(assg != nullptr);
// helper needs pointer to struct, not struct itself
if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(structType != nullptr);
assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true);
}
else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT)
{
assg = gtNewCastNode(TYP_DOUBLE, assg, false, TYP_DOUBLE);
}
else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE)
{
assg = gtNewCastNode(TYP_FLOAT, assg, false, TYP_FLOAT);
}
args = gtNewCallArgs(assg);
helperType = TYP_VOID;
}
else if (access & CORINFO_ACCESS_GET)
{
helperType = lclTyp;
// The calling convention for the helper does not take into
// account optimization of primitive structs.
if ((pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT) && !varTypeIsStruct(lclTyp))
{
helperType = TYP_STRUCT;
}
}
}
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT || pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(pFieldInfo->structType != nullptr);
args = gtPrependNewCallArg(gtNewIconEmbClsHndNode(pFieldInfo->structType), args);
}
GenTree* fieldHnd = impTokenToHandle(pResolvedToken);
if (fieldHnd == nullptr)
{ // compDonotInline()
return nullptr;
}
args = gtPrependNewCallArg(fieldHnd, args);
// If it's a static field, we shouldn't have an object node
// If it's an instance field, we have an object node
assert((pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == nullptr));
if (objPtr != nullptr)
{
args = gtPrependNewCallArg(objPtr, args);
}
GenTreeCall* call = gtNewHelperCallNode(pFieldInfo->helper, genActualType(helperType), args);
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(call))
{
call->InitializeStructReturnType(this, structType, call->GetUnmanagedCallConv());
}
#endif // FEATURE_MULTIREG_RET
GenTree* result = call;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_GET)
{
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT)
{
if (!varTypeIsStruct(lclTyp))
{
// get the result as primitive type
result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true);
result = gtNewOperNode(GT_IND, lclTyp, result);
}
}
else if (varTypeIsIntegral(lclTyp) && genTypeSize(lclTyp) < genTypeSize(TYP_INT))
{
// The helper does not extend the small return types.
result = gtNewCastNode(genActualType(lclTyp), result, false, lclTyp);
}
}
}
else
{
// OK, now do the indirection
if (access & CORINFO_ACCESS_GET)
{
if (varTypeIsStruct(lclTyp))
{
result = gtNewObjNode(structType, result);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
}
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF);
}
else if (access & CORINFO_ACCESS_SET)
{
if (varTypeIsStruct(lclTyp))
{
result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
result = gtNewAssignNode(result, assg);
}
}
}
return result;
}
/*****************************************************************************
*
* Return true if the given node (excluding children trees) contains side effects.
* Note that it does not recurse, and children need to be handled separately.
* It may return false even if the node has GTF_SIDE_EFFECT (because of its children).
*
* Similar to OperMayThrow() (but handles GT_CALLs specially), but considers
* assignments too.
*/
bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags)
{
if (flags & GTF_ASG)
{
// TODO-Bug: This only checks for GT_ASG/GT_STORE_DYN_BLK but according to OperRequiresAsgFlag
// there are many more opers that are considered to have an assignment side effect: atomic ops
// (GT_CMPXCHG & co.), GT_MEMORYBARRIER (not classified as an atomic op) and HW intrinsic
// memory stores. Atomic ops have special handling in gtExtractSideEffList but the others
// will simply be dropped is they are ever subject to an "extract side effects" operation.
// It is possible that the reason no bugs have yet been observed in this area is that the
// other nodes are likely to always be tree roots.
if (tree->OperIs(GT_ASG, GT_STORE_DYN_BLK))
{
return true;
}
}
// Are there only GTF_CALL side effects remaining? (and no other side effect kinds)
if (flags & GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
GenTreeCall* const call = tree->AsCall();
const bool ignoreExceptions = (flags & GTF_EXCEPT) == 0;
const bool ignoreCctors = (flags & GTF_IS_IN_CSE) != 0; // We can CSE helpers that run cctors.
if (!call->HasSideEffects(this, ignoreExceptions, ignoreCctors))
{
// If this call is otherwise side effect free, check its arguments.
for (GenTreeCall::Use& use : call->Args())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// I'm a little worried that args that assign to temps that are late args will look like
// side effects...but better to be conservative for now.
for (GenTreeCall::Use& use : call->LateArgs())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// Otherwise:
return false;
}
// Otherwise the GT_CALL is considered to have side-effects.
return true;
}
}
if (flags & GTF_EXCEPT)
{
if (tree->OperMayThrow(this))
{
return true;
}
}
// Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side
// effects (if we care about GTF_MAKE_CSE).
if ((flags & GTF_MAKE_CSE) && (tree->gtFlags & GTF_MAKE_CSE))
{
return true;
}
return false;
}
/*****************************************************************************
* Returns true if the expr tree has any side effects.
*/
bool Compiler::gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags /* = GTF_SIDE_EFFECT*/)
{
// These are the side effect flags that we care about for this tree
GenTreeFlags sideEffectFlags = tree->gtFlags & flags;
// Does this tree have any Side-effect flags set that we care about?
if (sideEffectFlags == 0)
{
// no it doesn't..
return false;
}
if (sideEffectFlags == GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
// Generally all trees that contain GT_CALL nodes are considered to have side-effects.
//
if (tree->AsCall()->gtCallType == CT_HELPER)
{
// If this node is a helper call we may not care about the side-effects.
// Note that gtNodeHasSideEffects checks the side effects of the helper itself
// as well as the side effects of its arguments.
return gtNodeHasSideEffects(tree, flags);
}
}
else if (tree->OperGet() == GT_INTRINSIC)
{
if (gtNodeHasSideEffects(tree, flags))
{
return true;
}
if (gtNodeHasSideEffects(tree->AsOp()->gtOp1, flags))
{
return true;
}
if ((tree->AsOp()->gtOp2 != nullptr) && gtNodeHasSideEffects(tree->AsOp()->gtOp2, flags))
{
return true;
}
return false;
}
}
return true;
}
GenTree* Compiler::gtBuildCommaList(GenTree* list, GenTree* expr)
{
// 'list' starts off as null,
// and when it is null we haven't started the list yet.
//
if (list != nullptr)
{
// Create a GT_COMMA that appends 'expr' in front of the remaining set of expressions in (*list)
GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, expr, list);
// Set the flags in the comma node
result->gtFlags |= (list->gtFlags & GTF_ALL_EFFECT);
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
DBEXEC(fgGlobalMorph, result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one (unless we are remorphing,
// in which case a prior transform involving either node may have discarded or otherwise invalidated the value
// numbers).
assert((list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined()) || !fgGlobalMorph);
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
if (list->gtVNPair.BothDefined() && expr->gtVNPair.BothDefined())
{
// The result of a GT_COMMA node is op2, the normal value number is op2vnp
// But we also need to include the union of side effects from op1 and op2.
// we compute this value into exceptions_vnp.
ValueNumPair op1vnp;
ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
ValueNumPair op2vnp;
ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(expr->gtVNPair, &op1vnp, &op1Xvnp);
vnStore->VNPUnpackExc(list->gtVNPair, &op2vnp, &op2Xvnp);
ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
return result;
}
else
{
// The 'expr' will start the list of expressions
return expr;
}
}
//------------------------------------------------------------------------
// gtExtractSideEffList: Extracts side effects from the given expression.
//
// Arguments:
// expr - the expression tree to extract side effects from
// pList - pointer to a (possibly null) GT_COMMA list that
// will contain the extracted side effects
// flags - side effect flags to be considered
// ignoreRoot - ignore side effects on the expression root node
//
// Notes:
// Side effects are prepended to the GT_COMMA list such that op1 of
// each comma node holds the side effect tree and op2 points to the
// next comma node. The original side effect execution order is preserved.
//
void Compiler::gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags flags /* = GTF_SIDE_EFFECT*/,
bool ignoreRoot /* = false */)
{
class SideEffectExtractor final : public GenTreeVisitor<SideEffectExtractor>
{
public:
const GenTreeFlags m_flags;
ArrayStack<GenTree*> m_sideEffects;
enum
{
DoPreOrder = true,
UseExecutionOrder = true
};
SideEffectExtractor(Compiler* compiler, GenTreeFlags flags)
: GenTreeVisitor(compiler), m_flags(flags), m_sideEffects(compiler->getAllocator(CMK_SideEffects))
{
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
GenTree* node = *use;
bool treeHasSideEffects = m_compiler->gtTreeHasSideEffects(node, m_flags);
if (treeHasSideEffects)
{
if (m_compiler->gtNodeHasSideEffects(node, m_flags))
{
PushSideEffects(node);
if (node->OperIsBlk() && !node->OperIsStoreBlk())
{
JITDUMP("Replace an unused OBJ/BLK node [%06d] with a NULLCHECK\n", dspTreeID(node));
m_compiler->gtChangeOperToNullCheck(node, m_compiler->compCurBB);
}
return Compiler::WALK_SKIP_SUBTREES;
}
// TODO-Cleanup: These have GTF_ASG set but for some reason gtNodeHasSideEffects ignores
// them. See the related gtNodeHasSideEffects comment as well.
// Also, these nodes must always be preserved, no matter what side effect flags are passed
// in. But then it should never be the case that gtExtractSideEffList gets called without
// specifying GTF_ASG so there doesn't seem to be any reason to be inconsistent with
// gtNodeHasSideEffects and make this check unconditionally.
if (node->OperIsAtomicOp())
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
if ((m_flags & GTF_EXCEPT) != 0)
{
// Special case - GT_ADDR of GT_IND nodes of TYP_STRUCT have to be kept together.
if (node->OperIs(GT_ADDR) && node->gtGetOp1()->OperIsIndir() &&
(node->gtGetOp1()->TypeGet() == TYP_STRUCT))
{
JITDUMP("Keep the GT_ADDR and GT_IND together:\n");
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
}
// Generally all GT_CALL nodes are considered to have side-effects.
// So if we get here it must be a helper call that we decided it does
// not have side effects that we needed to keep.
assert(!node->OperIs(GT_CALL) || (node->AsCall()->gtCallType == CT_HELPER));
}
if ((m_flags & GTF_IS_IN_CSE) != 0)
{
// If we're doing CSE then we also need to unmark CSE nodes. This will fail for CSE defs,
// those need to be extracted as if they're side effects.
if (!UnmarkCSE(node))
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
// The existence of CSE defs and uses is not propagated up the tree like side
// effects are. We need to continue visiting the tree as if it has side effects.
treeHasSideEffects = true;
}
return treeHasSideEffects ? Compiler::WALK_CONTINUE : Compiler::WALK_SKIP_SUBTREES;
}
private:
bool UnmarkCSE(GenTree* node)
{
assert(m_compiler->optValnumCSE_phase);
if (m_compiler->optUnmarkCSE(node))
{
// The call to optUnmarkCSE(node) should have cleared any CSE info.
assert(!IS_CSE_INDEX(node->gtCSEnum));
return true;
}
else
{
assert(IS_CSE_DEF(node->gtCSEnum));
#ifdef DEBUG
if (m_compiler->verbose)
{
printf("Preserving the CSE def #%02d at ", GET_CSE_INDEX(node->gtCSEnum));
m_compiler->printTreeID(node);
}
#endif
return false;
}
}
void PushSideEffects(GenTree* node)
{
// The extracted side effect will no longer be an argument, so unmark it.
// This is safe to do because the side effects will be visited in pre-order,
// aborting as soon as any tree is extracted. Thus if an argument for a call
// is being extracted, it is guaranteed that the call itself will not be.
node->gtFlags &= ~GTF_LATE_ARG;
m_sideEffects.Push(node);
}
};
SideEffectExtractor extractor(this, flags);
if (ignoreRoot)
{
for (GenTree* op : expr->Operands())
{
extractor.WalkTree(&op, nullptr);
}
}
else
{
extractor.WalkTree(&expr, nullptr);
}
GenTree* list = *pList;
// The extractor returns side effects in execution order but gtBuildCommaList prepends
// to the comma-based side effect list so we have to build the list in reverse order.
// This is also why the list cannot be built while traversing the tree.
// The number of side effects is usually small (<= 4), less than the ArrayStack's
// built-in size, so memory allocation is avoided.
while (!extractor.m_sideEffects.Empty())
{
list = gtBuildCommaList(list, extractor.m_sideEffects.Pop());
}
*pList = list;
}
/*****************************************************************************
*
* For debugging only - displays a tree node list and makes sure all the
* links are correctly set.
*/
#ifdef DEBUG
void dispNodeList(GenTree* list, bool verbose)
{
GenTree* last = nullptr;
GenTree* next;
if (!list)
{
return;
}
for (;;)
{
next = list->gtNext;
if (verbose)
{
printf("%08X -> %08X -> %08X\n", last, list, next);
}
assert(!last || last->gtNext == list);
assert(next == nullptr || next->gtPrev == list);
if (!next)
{
break;
}
last = list;
list = next;
}
printf(""); // null string means flush
}
#endif
/*****************************************************************************
* Callback to mark the nodes of a qmark-colon subtree that are conditionally
* executed.
*/
/* static */
Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTree** pTree, fgWalkData* data)
{
assert(data->pCallbackData == nullptr);
(*pTree)->gtFlags |= GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
* Callback to clear the conditionally executed flags of nodes that no longer
will be conditionally executed. Note that when we find another colon we must
stop, as the nodes below this one WILL be conditionally executed. This callback
is called when folding a qmark condition (ie the condition is constant).
*/
/* static */
Compiler::fgWalkResult Compiler::gtClearColonCond(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
assert(data->pCallbackData == nullptr);
if (tree->OperGet() == GT_COLON)
{
// Nodes below this will be conditionally executed.
return WALK_SKIP_SUBTREES;
}
tree->gtFlags &= ~GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Callback used by the tree walker to implement fgFindLink()
*/
static Compiler::fgWalkResult gtFindLinkCB(GenTree** pTree, Compiler::fgWalkData* cbData)
{
Compiler::FindLinkData* data = (Compiler::FindLinkData*)cbData->pCallbackData;
if (*pTree == data->nodeToFind)
{
data->result = pTree;
data->parent = cbData->parent;
return Compiler::WALK_ABORT;
}
return Compiler::WALK_CONTINUE;
}
Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node)
{
FindLinkData data = {node, nullptr, nullptr};
fgWalkResult result = fgWalkTreePre(stmt->GetRootNodePointer(), gtFindLinkCB, &data);
if (result == WALK_ABORT)
{
assert(data.nodeToFind == *data.result);
return data;
}
else
{
return {node, nullptr, nullptr};
}
}
/*****************************************************************************
*
* Callback that checks if a tree node has oper type GT_CATCH_ARG
*/
static Compiler::fgWalkResult gtFindCatchArg(GenTree** pTree, Compiler::fgWalkData* /* data */)
{
return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT : Compiler::WALK_CONTINUE;
}
/*****************************************************************************/
bool Compiler::gtHasCatchArg(GenTree* tree)
{
if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) && (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// gtHasCallOnStack:
//
// Arguments:
// parentStack: a context (stack of parent nodes)
//
// Return Value:
// returns true if any of the parent nodes are a GT_CALL
//
// Assumptions:
// We have a stack of parent nodes. This generally requires that
// we are performing a recursive tree walk using struct fgWalkData
//
//------------------------------------------------------------------------
/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack* parentStack)
{
for (int i = 0; i < parentStack->Height(); i++)
{
GenTree* node = parentStack->Top(i);
if (node->OperGet() == GT_CALL)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// gtGetTypeProducerKind: determine if a tree produces a runtime type, and
// if so, how.
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// TypeProducerKind for the tree.
//
// Notes:
// Checks to see if this tree returns a RuntimeType value, and if so,
// how that value is determined.
//
// Currently handles these cases
// 1) The result of Object::GetType
// 2) The result of typeof(...)
// 3) A null reference
// 4) Tree is otherwise known to have type RuntimeType
//
// The null reference case is surprisingly common because operator
// overloading turns the otherwise innocuous
//
// Type t = ....;
// if (t == null)
//
// into a method call.
Compiler::TypeProducerKind Compiler::gtGetTypeProducerKind(GenTree* tree)
{
if (tree->gtOper == GT_CALL)
{
if (tree->AsCall()->gtCallType == CT_HELPER)
{
if (gtIsTypeHandleToRuntimeTypeHelper(tree->AsCall()))
{
return TPK_Handle;
}
}
else if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(tree->AsCall()->gtCallMethHnd) == NI_System_Object_GetType)
{
return TPK_GetType;
}
}
}
else if ((tree->gtOper == GT_INTRINSIC) && (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType))
{
return TPK_GetType;
}
else if ((tree->gtOper == GT_CNS_INT) && (tree->AsIntCon()->gtIconVal == 0))
{
return TPK_Null;
}
else
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull);
if (clsHnd != NO_CLASS_HANDLE && clsHnd == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
{
return TPK_Other;
}
}
return TPK_Unknown;
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHelperCall -- see if tree is constructing
// a RuntimeType from a handle
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call)
{
return call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) ||
call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHandleHelperCall -- see if tree is constructing
// a RuntimeTypeHandle from a handle
//
// Arguments:
// tree - tree to examine
// pHelper - optional pointer to a variable that receives the type of the helper
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper)
{
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
}
else if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL;
}
if (pHelper != nullptr)
{
*pHelper = helper;
}
return helper != CORINFO_HELP_UNDEF;
}
bool Compiler::gtIsActiveCSE_Candidate(GenTree* tree)
{
return (optValnumCSE_phase && IS_CSE_INDEX(tree->gtCSEnum));
}
/*****************************************************************************/
struct ComplexityStruct
{
unsigned m_numNodes;
unsigned m_nodeLimit;
ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit)
{
}
};
static Compiler::fgWalkResult ComplexityExceedsWalker(GenTree** pTree, Compiler::fgWalkData* data)
{
ComplexityStruct* pComplexity = (ComplexityStruct*)data->pCallbackData;
if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit)
{
return Compiler::WALK_ABORT;
}
else
{
return Compiler::WALK_CONTINUE;
}
}
bool Compiler::gtComplexityExceeds(GenTree** tree, unsigned limit)
{
ComplexityStruct complexity(limit);
if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT)
{
return true;
}
else
{
return false;
}
}
bool GenTree::IsPhiNode()
{
return (OperGet() == GT_PHI_ARG) || (OperGet() == GT_PHI) || IsPhiDefn();
}
bool GenTree::IsPhiDefn()
{
bool res = ((OperGet() == GT_ASG) && (AsOp()->gtOp2 != nullptr) && (AsOp()->gtOp2->OperGet() == GT_PHI)) ||
((OperGet() == GT_STORE_LCL_VAR) && (AsOp()->gtOp1 != nullptr) && (AsOp()->gtOp1->OperGet() == GT_PHI));
assert(!res || OperGet() == GT_STORE_LCL_VAR || AsOp()->gtOp1->OperGet() == GT_LCL_VAR);
return res;
}
// IsPartialLclFld: Check for a GT_LCL_FLD whose type is a different size than the lclVar.
//
// Arguments:
// comp - the Compiler object.
//
// Return Value:
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR
bool GenTree::IsPartialLclFld(Compiler* comp)
{
return ((gtOper == GT_LCL_FLD) &&
(comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType)));
}
bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
GenTreeBlk* blkNode = nullptr;
if (OperIs(GT_ASG))
{
if (AsOp()->gtOp1->IsLocal())
{
GenTreeLclVarCommon* lclVarTree = AsOp()->gtOp1->AsLclVarCommon();
*pLclVarTree = lclVarTree;
if (pIsEntire != nullptr)
{
if (lclVarTree->IsPartialLclFld(comp))
{
*pIsEntire = false;
}
else
{
*pIsEntire = true;
}
}
return true;
}
else if (AsOp()->gtOp1->OperGet() == GT_IND)
{
GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1;
return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire);
}
else if (AsOp()->gtOp1->OperIsBlk())
{
blkNode = AsOp()->gtOp1->AsBlk();
}
}
else if (OperIsBlk())
{
blkNode = this->AsBlk();
}
if (blkNode != nullptr)
{
GenTree* destAddr = blkNode->Addr();
unsigned width = blkNode->Size();
// Do we care about whether this assigns the entire variable?
if (pIsEntire != nullptr && blkNode->OperIs(GT_STORE_DYN_BLK))
{
GenTree* blockWidth = blkNode->AsStoreDynBlk()->gtDynamicSize;
if (blockWidth->IsCnsIntOrI())
{
assert(blockWidth->AsIntConCommon()->FitsInI32());
width = static_cast<unsigned>(blockWidth->AsIntConCommon()->IconValue());
if (width == 0)
{
return false;
}
}
}
return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
// Otherwise...
return false;
}
// Returns true if this GenTree defines a result which is based on the address of a local.
bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
if (OperGet() == GT_ADDR || OperGet() == GT_LCL_VAR_ADDR)
{
GenTree* addrArg = this;
if (OperGet() == GT_ADDR)
{
addrArg = AsOp()->gtOp1;
}
if (addrArg->IsLocal() || addrArg->OperIsLocalAddr())
{
GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon();
*pLclVarTree = addrArgLcl;
if (pIsEntire != nullptr)
{
unsigned lclOffset = addrArgLcl->GetLclOffs();
if (lclOffset != 0)
{
// We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false
*pIsEntire = false;
}
else
{
unsigned lclNum = addrArgLcl->GetLclNum();
unsigned varWidth = comp->lvaLclExactSize(lclNum);
if (comp->lvaTable[lclNum].lvNormalizeOnStore())
{
// It's normalize on store, so use the full storage width -- writing to low bytes won't
// necessarily yield a normalized value.
varWidth = genTypeStSz(var_types(comp->lvaTable[lclNum].lvType)) * sizeof(int);
}
*pIsEntire = (varWidth == width);
}
}
return true;
}
else if (addrArg->OperGet() == GT_IND)
{
// A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND
return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
}
// Post rationalization we could have GT_IND(GT_LEA(..)) trees.
else if (OperGet() == GT_LEA)
{
// This method gets invoked during liveness computation and therefore it is critical
// that we don't miss 'use' of any local. The below logic is making the assumption
// that in case of LEA(base, index, offset) - only base can be a GT_LCL_VAR_ADDR
// and index is not.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
GenTree* index = AsOp()->gtOp2;
if (index != nullptr)
{
assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire));
}
#endif // DEBUG
// base
GenTree* base = AsOp()->gtOp1;
if (base != nullptr)
{
// Lea could have an Indir as its base.
if (base->OperGet() == GT_IND)
{
base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true);
}
return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsLocalExpr: Determine if this is a LclVarCommon node and return some
// additional info about it in the two out parameters.
//
// Arguments:
// comp - The Compiler instance
// pLclVarTree - An "out" argument that returns the local tree as a
// LclVarCommon, if it is indeed local.
// pFldSeq - An "out" argument that returns the value numbering field
// sequence for the node, if any.
//
// Return Value:
// Returns true, and sets the out arguments accordingly, if this is
// a LclVarCommon node.
bool GenTree::IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
if (IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = AsLclVarCommon();
if (OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
// If this tree evaluates some sum of a local address and some constants,
// return the node for the local being addressed
GenTreeLclVarCommon* GenTree::IsLocalAddrExpr()
{
if (OperGet() == GT_ADDR)
{
return AsOp()->gtOp1->IsLocal() ? AsOp()->gtOp1->AsLclVarCommon() : nullptr;
}
else if (OperIsLocalAddr())
{
return this->AsLclVarCommon();
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp2->IsLocalAddrExpr();
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp1->IsLocalAddrExpr();
}
}
// Otherwise...
return nullptr;
}
//------------------------------------------------------------------------
// IsLocalAddrExpr: finds if "this" is an address of a local var/fld.
//
// Arguments:
// comp - a compiler instance;
// pLclVarTree - [out] sets to the node indicating the local variable if found;
// pFldSeq - [out] sets to the field sequence representing the field, else null;
// pOffset - [out](optional) sets to the sum offset of the lcl/fld if found,
// note it does not include pLclVarTree->GetLclOffs().
//
// Returns:
// Returns true if "this" represents the address of a local, or a field of a local.
//
// Notes:
// It is mostly used for optimizations but assertion propagation depends on it for correctness.
// So if this function does not recognize a def of a LCL_VAR we can have an incorrect optimization.
//
bool GenTree::IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset /* = nullptr */)
{
if (OperGet() == GT_ADDR)
{
assert(!comp->compRationalIRForm);
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = addrArg->AsLclVarCommon();
if (addrArg->OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(addrArg->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
else if (OperIsLocalAddr())
{
*pLclVarTree = this->AsLclVarCommon();
if (this->OperGet() == GT_LCL_FLD_ADDR)
{
*pFldSeq = comp->GetFieldSeqStore()->Append(this->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp1->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp2->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp2->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp1->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsImplicitByrefParameterValue: determine if this tree is the entire
// value of a local implicit byref parameter
//
// Arguments:
// compiler -- compiler instance
//
// Return Value:
// GenTreeLclVar node for the local, or nullptr.
//
GenTreeLclVar* GenTree::IsImplicitByrefParameterValue(Compiler* compiler)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
GenTreeLclVar* lcl = nullptr;
if (OperIs(GT_LCL_VAR))
{
lcl = AsLclVar();
}
else if (OperIs(GT_OBJ))
{
GenTree* addr = AsIndir()->Addr();
if (addr->OperIs(GT_LCL_VAR))
{
lcl = addr->AsLclVar();
}
else if (addr->OperIs(GT_ADDR))
{
GenTree* base = addr->AsOp()->gtOp1;
if (base->OperIs(GT_LCL_VAR))
{
lcl = base->AsLclVar();
}
}
}
if ((lcl != nullptr) && compiler->lvaIsImplicitByRefLocal(lcl->GetLclNum()))
{
return lcl;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return nullptr;
}
//------------------------------------------------------------------------
// IsLclVarUpdateTree: Determine whether this is an assignment tree of the
// form Vn = Vn 'oper' 'otherTree' where Vn is a lclVar
//
// Arguments:
// pOtherTree - An "out" argument in which 'otherTree' will be returned.
// pOper - An "out" argument in which 'oper' will be returned.
//
// Return Value:
// If the tree is of the above form, the lclNum of the variable being
// updated is returned, and 'pOtherTree' and 'pOper' are set.
// Otherwise, returns BAD_VAR_NUM.
//
// Notes:
// 'otherTree' can have any shape.
// We avoid worrying about whether the op is commutative by only considering the
// first operand of the rhs. It is expected that most trees of this form will
// already have the lclVar on the lhs.
// TODO-CQ: Evaluate whether there are missed opportunities due to this, or
// whether gtSetEvalOrder will already have put the lclVar on the lhs in
// the cases of interest.
unsigned GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps* pOper)
{
unsigned lclNum = BAD_VAR_NUM;
if (OperIs(GT_ASG))
{
GenTree* lhs = AsOp()->gtOp1;
GenTree* rhs = AsOp()->gtOp2;
if ((lhs->OperGet() == GT_LCL_VAR) && rhs->OperIsBinary())
{
unsigned lhsLclNum = lhs->AsLclVarCommon()->GetLclNum();
GenTree* rhsOp1 = rhs->AsOp()->gtOp1;
GenTree* rhsOp2 = rhs->AsOp()->gtOp2;
// Some operators, such as HWINTRINSIC, are currently declared as binary but
// may not have two operands. We must check that both operands actually exist.
if ((rhsOp1 != nullptr) && (rhsOp2 != nullptr) && (rhsOp1->OperGet() == GT_LCL_VAR) &&
(rhsOp1->AsLclVarCommon()->GetLclNum() == lhsLclNum))
{
lclNum = lhsLclNum;
*pOtherTree = rhsOp2;
*pOper = rhs->OperGet();
}
}
}
return lclNum;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// canBeContained: check whether this tree node may be a subcomponent of its parent for purposes
// of code generation.
//
// Return Value:
// True if it is possible to contain this node and false otherwise.
//
bool GenTree::canBeContained() const
{
assert(OperIsLIR());
if (IsMultiRegLclVar())
{
return false;
}
if (gtHasReg(nullptr))
{
return false;
}
// It is not possible for nodes that do not produce values or that are not containable values to be contained.
if (!IsValue() || ((DebugOperKind() & DBK_NOCONTAIN) != 0) || (OperIsHWIntrinsic() && !isContainableHWIntrinsic()))
{
return false;
}
return true;
}
#endif // DEBUG
//------------------------------------------------------------------------
// isContained: check whether this tree node is a subcomponent of its parent for codegen purposes
//
// Return Value:
// Returns true if there is no code generated explicitly for this node.
// Essentially, it will be rolled into the code generation for the parent.
//
// Assumptions:
// This method relies upon the value of the GTF_CONTAINED flag.
// Therefore this method is only valid after Lowering.
// Also note that register allocation or other subsequent phases may cause
// nodes to become contained (or not) and therefore this property may change.
//
bool GenTree::isContained() const
{
assert(OperIsLIR());
const bool isMarkedContained = ((gtFlags & GTF_CONTAINED) != 0);
#ifdef DEBUG
if (!canBeContained())
{
assert(!isMarkedContained);
}
// these actually produce a register (the flags reg, we just don't model it)
// and are a separate instruction from the branch that consumes the result.
// They can only produce a result if the child is a SIMD equality comparison.
else if (OperIsCompare())
{
assert(isMarkedContained == false);
}
// if it's contained it can't be unused.
if (isMarkedContained)
{
assert(!IsUnusedValue());
}
#endif // DEBUG
return isMarkedContained;
}
// return true if node is contained and an indir
bool GenTree::isContainedIndir() const
{
return OperIsIndir() && isContained();
}
bool GenTree::isIndirAddrMode()
{
return OperIsIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
{
return OperGet() == GT_IND || OperGet() == GT_STOREIND;
}
bool GenTreeIndir::HasBase()
{
return Base() != nullptr;
}
bool GenTreeIndir::HasIndex()
{
return Index() != nullptr;
}
GenTree* GenTreeIndir::Base()
{
GenTree* addr = Addr();
if (isIndirAddrMode())
{
GenTree* result = addr->AsAddrMode()->Base();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
}
}
GenTree* GenTreeIndir::Index()
{
if (isIndirAddrMode())
{
GenTree* result = Addr()->AsAddrMode()->Index();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return nullptr;
}
}
unsigned GenTreeIndir::Scale()
{
if (HasIndex())
{
return Addr()->AsAddrMode()->gtScale;
}
else
{
return 1;
}
}
ssize_t GenTreeIndir::Offset()
{
if (isIndirAddrMode())
{
return Addr()->AsAddrMode()->Offset();
}
else if (Addr()->gtOper == GT_CLS_VAR_ADDR)
{
return static_cast<ssize_t>(reinterpret_cast<intptr_t>(Addr()->AsClsVar()->gtClsVarHnd));
}
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
{
return Addr()->AsIntConCommon()->IconValue();
}
else
{
return 0;
}
}
//------------------------------------------------------------------------
// GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if this immediate value requires us to record a relocation for it; false otherwise.
bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
{
return comp->opts.compReloc && (gtOper == GT_CNS_INT) && IsIconHandle();
}
//------------------------------------------------------------------------
// ImmedValCanBeFolded: can this immediate value be folded for op?
//
// Arguments:
// comp - Compiler instance
// op - Tree operator
//
// Return Value:
// True if this immediate value can be folded for op; false otherwise.
bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
{
// In general, immediate values that need relocations can't be folded.
// There are cases where we do want to allow folding of handle comparisons
// (e.g., typeof(T) == typeof(int)).
return !ImmedValNeedsReloc(comp) || (op == GT_EQ) || (op == GT_NE);
}
#ifdef TARGET_AMD64
// Returns true if this absolute address fits within the base of an addr mode.
// On Amd64 this effectively means, whether an absolute indirect address can
// be encoded as 32-bit offset relative to IP or zero.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
// During Jitting, we are allowed to generate non-relocatable code.
// On Amd64 we can encode an absolute indirect addr as an offset relative to zero or RIP.
// An absolute indir addr that can fit within 32-bits can ben encoded as an offset relative
// to zero. All other absolute indir addr could be attempted to be encoded as RIP relative
// based on reloc hint provided by VM. RIP relative encoding is preferred over relative
// to zero, because the former is one byte smaller than the latter. For this reason
// we check for reloc hint first and then whether addr fits in 32-bits next.
//
// VM starts off with an initial state to allow both data and code address to be encoded as
// pc-relative offsets. Hence JIT will attempt to encode all absolute addresses as pc-relative
// offsets. It is possible while jitting a method, an address could not be encoded as a
// pc-relative offset. In that case VM will note the overflow and will trigger re-jitting
// of the method with reloc hints turned off for all future methods. Second time around
// jitting will succeed since JIT will not attempt to encode data addresses as pc-relative
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
}
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue());
}
}
#elif defined(TARGET_X86)
// Returns true if this absolute address fits within the base of an addr mode.
// On x86 all addresses are 4-bytes and can be directly encoded in an addr mode.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
return IsCnsIntOrI();
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
// If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// IsFieldAddr: Is "this" a static or class field address?
//
// Recognizes the following patterns:
// this: ADD(baseAddr, CONST [FldSeq])
// this: ADD(CONST [FldSeq], baseAddr)
// this: CONST [FldSeq]
// this: Zero [FldSeq]
//
// Arguments:
// comp - the Compiler object
// pBaseAddr - [out] parameter for "the base address"
// pFldSeq - [out] parameter for the field sequence
//
// Return Value:
// If "this" matches patterns denoted above, and the FldSeq found is "full",
// i. e. starts with a class field or a static field, and includes all the
// struct fields that this tree represents the address of, this method will
// return "true" and set either "pBaseAddr" to some value, which must be used
// by the caller as the key into the "first field map" to obtain the actual
// value for the field. For instance fields, "base address" will be the object
// reference, for statics - the address to which the field offset with the
// field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField".
//
bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq)
{
assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF));
*pBaseAddr = nullptr;
*pFldSeq = FieldSeqStore::NotAField();
GenTree* baseAddr = nullptr;
FieldSeqNode* fldSeq = FieldSeqStore::NotAField();
if (OperIs(GT_ADD))
{
// If one operand has a field sequence, the other operand must not have one
// as the order of fields in that case would not be well-defined.
if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle())
{
assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle());
fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle());
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp1;
}
if (baseAddr != nullptr)
{
assert(!baseAddr->TypeIs(TYP_REF) || !comp->GetZeroOffsetFieldMap()->Lookup(baseAddr));
}
}
else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL))
{
assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr));
fldSeq = AsIntCon()->gtFieldSeq;
baseAddr = nullptr;
}
else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq))
{
baseAddr = this;
}
else
{
return false;
}
assert(fldSeq != nullptr);
if ((fldSeq == FieldSeqStore::NotAField()) || fldSeq->IsPseudoField())
{
return false;
}
// The above screens out obviously invalid cases, but we have more checks to perform. The
// sequence returned from this method *must* start with either a class (NOT struct) field
// or a static field. To avoid the expense of calling "getFieldClass" here, we will instead
// rely on the invariant that TYP_REF base addresses can never appear for struct fields - we
// will effectively treat such cases ("possible" in unsafe code) as undefined behavior.
if (comp->eeIsFieldStatic(fldSeq->GetFieldHandle()))
{
// TODO-VNTypes: this code is out of sync w.r.t. boxed statics that are numbered with
// VNF_PtrToStatic and treated as "simple" while here we treat them as "complex".
// TODO-VNTypes: we will always return the "baseAddr" here for now, but strictly speaking,
// we only need to do that if we have a shared field, to encode the logical "instantiation"
// argument. In all other cases, this serves no purpose and just leads to redundant maps.
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
if (baseAddr->TypeIs(TYP_REF))
{
assert(!comp->eeIsValueClass(comp->info.compCompHnd->getFieldClass(fldSeq->GetFieldHandle())));
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
// This case is reached, for example, if we have a chain of struct fields that are based on
// some pointer. We do not model such cases because we do not model maps for ByrefExposed
// memory, as it does not have the non-aliasing property of GcHeap and reference types.
return false;
}
bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd)
{
if (fieldNodeType != TYP_REF)
{
return false;
}
noway_assert(fldHnd != nullptr);
CorInfoType cit = info.compCompHnd->getFieldType(fldHnd);
var_types fieldTyp = JITtype2varType(cit);
return fieldTyp != TYP_REF;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// gtGetSIMDZero: Get a zero value of the appropriate SIMD type.
//
// Arguments:
// var_types - The simdType
// simdBaseJitType - The SIMD base JIT type we need
// simdHandle - The handle for the SIMD type
//
// Return Value:
// A node generating the appropriate Zero, if we are able to discern it,
// otherwise null (note that this shouldn't happen, but callers should
// be tolerant of this case).
GenTree* Compiler::gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle)
{
bool found = false;
bool isHWSIMD = true;
noway_assert(m_simdHandleCache != nullptr);
// First, determine whether this is Vector<T>.
if (simdType == getSIMDVectorType())
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
found = (simdHandle == m_simdHandleCache->SIMDFloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
found = (simdHandle == m_simdHandleCache->SIMDDoubleHandle);
break;
case CORINFO_TYPE_INT:
found = (simdHandle == m_simdHandleCache->SIMDIntHandle);
break;
case CORINFO_TYPE_USHORT:
found = (simdHandle == m_simdHandleCache->SIMDUShortHandle);
break;
case CORINFO_TYPE_UBYTE:
found = (simdHandle == m_simdHandleCache->SIMDUByteHandle);
break;
case CORINFO_TYPE_SHORT:
found = (simdHandle == m_simdHandleCache->SIMDShortHandle);
break;
case CORINFO_TYPE_BYTE:
found = (simdHandle == m_simdHandleCache->SIMDByteHandle);
break;
case CORINFO_TYPE_LONG:
found = (simdHandle == m_simdHandleCache->SIMDLongHandle);
break;
case CORINFO_TYPE_UINT:
found = (simdHandle == m_simdHandleCache->SIMDUIntHandle);
break;
case CORINFO_TYPE_ULONG:
found = (simdHandle == m_simdHandleCache->SIMDULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
found = (simdHandle == m_simdHandleCache->SIMDNIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
found = (simdHandle == m_simdHandleCache->SIMDNUIntHandle);
break;
default:
break;
}
if (found)
{
isHWSIMD = false;
}
}
if (!found)
{
// We must still have isHWSIMD set to true, and the only non-HW types left are the fixed types.
switch (simdType)
{
case TYP_SIMD8:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector2Handle)
{
isHWSIMD = false;
}
#if defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector64FloatHandle);
}
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector64IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector64UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector64UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector64ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector64ByteHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector64UIntHandle);
#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
break;
default:
break;
}
break;
case TYP_SIMD12:
assert((simdBaseJitType == CORINFO_TYPE_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
isHWSIMD = false;
break;
case TYP_SIMD16:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector4Handle)
{
isHWSIMD = false;
}
#if defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector128FloatHandle);
}
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector128DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector128IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector128UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector128UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector128ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector128ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector128LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector128UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector128ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector128NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector128NUIntHandle);
break;
#endif // defined(FEATURE_HW_INTRINSICS)
default:
break;
}
break;
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
assert(simdHandle == m_simdHandleCache->Vector256FloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector256DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector256IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector256UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector256UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector256ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector256ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector256LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector256UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector256ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector256NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector256NUIntHandle);
break;
default:
break;
}
break;
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
default:
break;
}
}
unsigned size = genTypeSize(simdType);
if (isHWSIMD)
{
#if defined(FEATURE_HW_INTRINSICS)
return gtNewSimdZeroNode(simdType, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ false);
#else
JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType),
varTypeName(JitType2PreciseVarType(simdBaseJitType)));
return nullptr;
#endif // FEATURE_HW_INTRINSICS
}
else
{
return gtNewSIMDVectorZero(simdType, simdBaseJitType, size);
}
}
#endif // FEATURE_SIMD
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
tree = tree->gtEffectiveVal();
if (varTypeIsStruct(tree->gtType))
{
switch (tree->gtOper)
{
default:
break;
case GT_MKREFANY:
structHnd = impGetRefAnyClass();
break;
case GT_OBJ:
structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
break;
case GT_BLK:
structHnd = tree->AsBlk()->GetLayout()->GetClassHandle();
break;
case GT_CALL:
structHnd = tree->AsCall()->gtRetClsHnd;
break;
case GT_RET_EXPR:
structHnd = tree->AsRetExpr()->gtRetClsHnd;
break;
case GT_ARGPLACE:
structHnd = tree->AsArgPlace()->gtArgPlaceClsHnd;
break;
case GT_INDEX:
structHnd = tree->AsIndex()->gtStructElemClass;
break;
case GT_FIELD:
info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd);
break;
case GT_ASG:
structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1());
break;
case GT_LCL_FLD:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
#endif
break;
case GT_LCL_VAR:
{
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
structHnd = lvaGetStruct(lclNum);
break;
}
case GT_RETURN:
structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1);
break;
case GT_IND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
else
#endif
{
// Attempt to find a handle for this expression.
// We can do this for an array element indirection, or for a field indirection.
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
structHnd = arrInfo.m_elemStructType;
}
else
{
GenTree* addr = tree->AsIndir()->Addr();
FieldSeqNode* fieldSeq = nullptr;
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->OperIs(GT_CNS_INT))
{
fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
}
else
{
GetZeroOffsetFieldMap()->Lookup(addr, &fieldSeq);
}
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
{
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &structHnd);
// With unsafe code and type casts
// this can return a primitive type and have nullptr for structHnd
// see runtime/issues/38541
}
}
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->GetSimdBaseJitType());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((tree->gtFlags & GTF_SIMDASHW_OP) != 0)
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
else
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
break;
#endif
break;
}
// TODO-1stClassStructs: add a check that `structHnd != NO_CLASS_HANDLE`,
// nowadays it won't work because the right part of an ASG could have struct type without a handle
// (check `fgMorphBlockOperand(isBlkReqd`) and a few other cases.
}
return structHnd;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree);
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
//------------------------------------------------------------------------
// gtGetClassHandle: find class handle for a ref type
//
// Arguments:
// tree -- tree to find handle for
// pIsExact [out] -- whether handle is exact type
// pIsNonNull [out] -- whether tree value is known not to be null
//
// Return Value:
// nullptr if class handle is unknown,
// otherwise the class handle.
// *pIsExact set true if tree type is known to be exactly the handle type,
// otherwise actual type may be a subtype.
// *pIsNonNull set true if tree value is known not to be null,
// otherwise a null value is possible.
CORINFO_CLASS_HANDLE Compiler::gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull)
{
// Set default values for our out params.
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
// Bail out if we're just importing and not generating code, since
// the jit uses TYP_REF for CORINFO_TYPE_VAR locals and args, but
// these may not be ref types.
if (compIsForImportOnly())
{
return objClass;
}
// Bail out if the tree is not a ref type.
var_types treeType = tree->TypeGet();
if (treeType != TYP_REF)
{
return objClass;
}
// Tunnel through commas.
GenTree* obj = tree->gtEffectiveVal(false);
const genTreeOps objOp = obj->OperGet();
switch (objOp)
{
case GT_COMMA:
{
// gtEffectiveVal above means we shouldn't see commas here.
assert(!"unexpected GT_COMMA");
break;
}
case GT_LCL_VAR:
{
// For locals, pick up type info from the local table.
const unsigned objLcl = obj->AsLclVar()->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
break;
}
case GT_FIELD:
{
// For fields, get the type from the field handle.
CORINFO_FIELD_HANDLE fieldHnd = obj->AsField()->gtFldHnd;
if (fieldHnd != nullptr)
{
objClass = gtGetFieldClassHandle(fieldHnd, pIsExact, pIsNonNull);
}
break;
}
case GT_RET_EXPR:
{
// If we see a RET_EXPR, recurse through to examine the
// return value expression.
GenTree* retExpr = tree->AsRetExpr()->gtInlineCandidate;
objClass = gtGetClassHandle(retExpr, pIsExact, pIsNonNull);
break;
}
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
if ((ni == NI_System_Array_Clone) || (ni == NI_System_Object_MemberwiseClone))
{
objClass = gtGetClassHandle(call->gtCallThisArg->GetNode(), pIsExact, pIsNonNull);
break;
}
CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(call->gtCallMethHnd);
if (specialObjClass != nullptr)
{
objClass = specialObjClass;
*pIsExact = true;
*pIsNonNull = true;
break;
}
}
if (call->IsInlineCandidate())
{
// For inline candidates, we've already cached the return
// type class handle in the inline info.
InlineCandidateInfo* inlInfo = call->gtInlineCandidateInfo;
assert(inlInfo != nullptr);
// Grab it as our first cut at a return type.
assert(inlInfo->methInfo.args.retType == CORINFO_TYPE_CLASS);
objClass = inlInfo->methInfo.args.retTypeClass;
// If the method is shared, the above may not capture
// the most precise return type information (that is,
// it may represent a shared return type and as such,
// have instances of __Canon). See if we can use the
// context to get at something more definite.
//
// For now, we do this here on demand rather than when
// processing the call, but we could/should apply
// similar sharpening to the argument and local types
// of the inlinee.
const unsigned retClassFlags = info.compCompHnd->getClassAttribs(objClass);
if (retClassFlags & CORINFO_FLG_SHAREDINST)
{
CORINFO_CONTEXT_HANDLE context = inlInfo->exactContextHnd;
if (context != nullptr)
{
CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(context);
// Grab the signature in this context.
CORINFO_SIG_INFO sig;
eeGetMethodSig(call->gtCallMethHnd, &sig, exactClass);
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
}
else if (call->gtCallType == CT_USER_FUNC)
{
// For user calls, we can fetch the approximate return
// type info from the method handle. Unfortunately
// we've lost the exact context, so this is the best
// we can do for now.
CORINFO_METHOD_HANDLE method = call->gtCallMethHnd;
CORINFO_CLASS_HANDLE exactClass = nullptr;
CORINFO_SIG_INFO sig;
eeGetMethodSig(method, &sig, exactClass);
if (sig.retType == CORINFO_TYPE_VOID)
{
// This is a constructor call.
const unsigned methodFlags = info.compCompHnd->getMethodAttribs(method);
assert((methodFlags & CORINFO_FLG_CONSTRUCTOR) != 0);
objClass = info.compCompHnd->getMethodClass(method);
*pIsExact = true;
*pIsNonNull = true;
}
else
{
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
else if (call->gtCallType == CT_HELPER)
{
objClass = gtGetHelperCallClassHandle(call, pIsExact, pIsNonNull);
}
break;
}
case GT_INTRINSIC:
{
GenTreeIntrinsic* intrinsic = obj->AsIntrinsic();
if (intrinsic->gtIntrinsicName == NI_System_Object_GetType)
{
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsExact = false;
*pIsNonNull = true;
}
break;
}
case GT_CNS_STR:
{
// For literal strings, we know the class and that the
// value is not null.
objClass = impGetStringClass();
*pIsExact = true;
*pIsNonNull = true;
break;
}
case GT_IND:
{
GenTreeIndir* indir = obj->AsIndir();
if (indir->HasBase() && !indir->HasIndex())
{
// indir(addr(lcl)) --> lcl
//
// This comes up during constrained callvirt on ref types.
GenTree* base = indir->Base();
GenTreeLclVarCommon* lcl = base->IsLocalAddrExpr();
if ((lcl != nullptr) && (base->OperGet() != GT_ADD))
{
const unsigned objLcl = lcl->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
}
else if (base->OperGet() == GT_ARR_ELEM)
{
// indir(arr_elem(...)) -> array element type
GenTree* array = base->AsArrElem()->gtArrObj;
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
}
else if (base->OperGet() == GT_ADD)
{
// This could be a static field access.
//
// See if op1 is a static field base helper call
// and if so, op2 will have the field info.
GenTree* op1 = base->AsOp()->gtOp1;
GenTree* op2 = base->AsOp()->gtOp2;
const bool op1IsStaticFieldBase = gtIsStaticGCBaseHelperCall(op1);
if (op1IsStaticFieldBase && (op2->OperGet() == GT_CNS_INT))
{
FieldSeqNode* fieldSeq = op2->AsIntCon()->gtFieldSeq;
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
assert(!fieldSeq->IsPseudoField());
// No benefit to calling gtGetFieldClassHandle here, as
// the exact field being accessed can vary.
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
assert(fieldCorType == CORINFO_TYPE_CLASS);
objClass = fieldClass;
}
}
}
}
break;
}
case GT_BOX:
{
// Box should just wrap a local var reference which has
// the type we're looking for. Also box only represents a
// non-nullable value type so result cannot be null.
GenTreeBox* box = obj->AsBox();
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
objClass = lvaTable[boxTempLcl].lvClassHnd;
*pIsExact = lvaTable[boxTempLcl].lvClassIsExact;
*pIsNonNull = true;
break;
}
case GT_INDEX:
{
GenTree* array = obj->AsIndex()->Arr();
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
break;
}
default:
{
break;
}
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetHelperCallClassHandle: find class handle for return value of a
// helper call
//
// Arguments:
// call - helper call to examine
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if return value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull)
{
assert(call->gtCallType == CT_HELPER);
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL:
{
// Note for some runtimes these helpers return exact types.
//
// But in those cases the types are also sealed, so there's no
// need to claim exactness here.
const bool helperResultNonNull = (helper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsNonNull = helperResultNonNull;
break;
}
case CORINFO_HELP_CHKCASTCLASS:
case CORINFO_HELP_CHKCASTANY:
case CORINFO_HELP_CHKCASTARRAY:
case CORINFO_HELP_CHKCASTINTERFACE:
case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
case CORINFO_HELP_ISINSTANCEOFINTERFACE:
case CORINFO_HELP_ISINSTANCEOFARRAY:
case CORINFO_HELP_ISINSTANCEOFCLASS:
case CORINFO_HELP_ISINSTANCEOFANY:
{
// Fetch the class handle from the helper call arglist
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* typeArg = args->GetNode();
CORINFO_CLASS_HANDLE castHnd = gtGetHelperArgClassHandle(typeArg);
// We generally assume the type being cast to is the best type
// for the result, unless it is an interface type.
//
// TODO-CQ: when we have default interface methods then
// this might not be the best assumption. We could also
// explore calling something like mergeClasses to identify
// the more specific class. A similar issue arises when
// typing the temp in impCastClassOrIsInstToTree, when we
// expand the cast inline.
if (castHnd != nullptr)
{
DWORD attrs = info.compCompHnd->getClassAttribs(castHnd);
if ((attrs & CORINFO_FLG_INTERFACE) != 0)
{
castHnd = nullptr;
}
}
// If we don't have a good estimate for the type we can use the
// type from the value being cast instead.
if (castHnd == nullptr)
{
GenTree* valueArg = args->GetNext()->GetNode();
castHnd = gtGetClassHandle(valueArg, pIsExact, pIsNonNull);
}
// We don't know at jit time if the cast will succeed or fail, but if it
// fails at runtime then an exception is thrown for cast helpers, or the
// result is set null for instance helpers.
//
// So it safe to claim the result has the cast type.
// Note we don't know for sure that it is exactly this type.
if (castHnd != nullptr)
{
objClass = castHnd;
}
break;
}
case CORINFO_HELP_NEWARR_1_DIRECT:
case CORINFO_HELP_NEWARR_1_OBJ:
case CORINFO_HELP_NEWARR_1_VC:
case CORINFO_HELP_NEWARR_1_ALIGN8:
case CORINFO_HELP_READYTORUN_NEWARR_1:
{
CORINFO_CLASS_HANDLE arrayHnd = (CORINFO_CLASS_HANDLE)call->compileTimeHelperArgumentHandle;
if (arrayHnd != NO_CLASS_HANDLE)
{
objClass = arrayHnd;
*pIsExact = true;
*pIsNonNull = true;
}
break;
}
default:
break;
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetArrayElementClassHandle: find class handle for elements of an array
// of ref types
//
// Arguments:
// array -- array to find handle for
//
// Return Value:
// nullptr if element class handle is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetArrayElementClassHandle(GenTree* array)
{
bool isArrayExact = false;
bool isArrayNonNull = false;
CORINFO_CLASS_HANDLE arrayClassHnd = gtGetClassHandle(array, &isArrayExact, &isArrayNonNull);
if (arrayClassHnd != nullptr)
{
// We know the class of the reference
DWORD attribs = info.compCompHnd->getClassAttribs(arrayClassHnd);
if ((attribs & CORINFO_FLG_ARRAY) != 0)
{
// We know for sure it is an array
CORINFO_CLASS_HANDLE elemClassHnd = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayClassHnd, &elemClassHnd);
if (arrayElemType == CORINFO_TYPE_CLASS)
{
// We know it is an array of ref types
return elemClassHnd;
}
}
}
return nullptr;
}
//------------------------------------------------------------------------
// gtGetFieldClassHandle: find class handle for a field
//
// Arguments:
// fieldHnd - field handle for field in question
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if field value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
//
// May examine runtime state of static field instances.
CORINFO_CLASS_HANDLE Compiler::gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull)
{
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
if (fieldCorType == CORINFO_TYPE_CLASS)
{
// Optionally, look at the actual type of the field's value
bool queryForCurrentClass = true;
INDEBUG(queryForCurrentClass = (JitConfig.JitQueryCurrentStaticFieldClass() > 0););
if (queryForCurrentClass)
{
#if DEBUG
const char* fieldClassName = nullptr;
const char* fieldName = eeGetFieldName(fieldHnd, &fieldClassName);
JITDUMP("Querying runtime about current class of field %s.%s (declared as %s)\n", fieldClassName, fieldName,
eeGetClassName(fieldClass));
#endif // DEBUG
// Is this a fully initialized init-only static field?
//
// Note we're not asking for speculative results here, yet.
CORINFO_CLASS_HANDLE currentClass = info.compCompHnd->getStaticFieldCurrentClass(fieldHnd);
if (currentClass != NO_CLASS_HANDLE)
{
// Yes! We know the class exactly and can rely on this to always be true.
fieldClass = currentClass;
*pIsExact = true;
*pIsNonNull = true;
JITDUMP("Runtime reports field is init-only and initialized and has class %s\n",
eeGetClassName(fieldClass));
}
else
{
JITDUMP("Field's current class not available\n");
}
}
}
return fieldClass;
}
//------------------------------------------------------------------------
// gtIsGCStaticBaseHelperCall: true if tree is fetching the gc static base
// for a subsequent static field access
//
// Arguments:
// tree - tree to consider
//
// Return Value:
// true if the tree is a suitable helper call
//
// Notes:
// Excludes R2R helpers as they specify the target field in a way
// that is opaque to the jit.
bool Compiler::gtIsStaticGCBaseHelperCall(GenTree* tree)
{
if (tree->OperGet() != GT_CALL)
{
return false;
}
GenTreeCall* call = tree->AsCall();
if (call->gtCallType != CT_HELPER)
{
return false;
}
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
// We are looking for a REF type so only need to check for the GC base helpers
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
return true;
default:
break;
}
return false;
}
void GenTree::ParseArrayAddress(
Compiler* comp, ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
{
*pArr = nullptr;
ValueNum inxVN = ValueNumStore::NoVN;
target_ssize_t offset = 0;
FieldSeqNode* fldSeq = nullptr;
ParseArrayAddressWork(comp, 1, pArr, &inxVN, &offset, &fldSeq);
// If we didn't find an array reference (perhaps it is the constant null?) we will give up.
if (*pArr == nullptr)
{
return;
}
// OK, new we have to figure out if any part of the "offset" is a constant contribution to the index.
// First, sum the offsets of any fields in fldSeq.
unsigned fieldOffsets = 0;
FieldSeqNode* fldSeqIter = fldSeq;
// Also, find the first non-pseudo field...
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
{
if (fldSeqIter == FieldSeqStore::NotAField())
{
// TODO-Review: A NotAField here indicates a failure to properly maintain the field sequence
// See test case self_host_tests_x86\jit\regression\CLR-x86-JIT\v1-m12-beta2\ b70992\ b70992.exe
// Safest thing to do here is to drop back to MinOpts
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (comp->opts.optRepeat)
{
// We don't guarantee preserving these annotations through the entire optimizer, so
// just conservatively return null if under optRepeat.
*pArr = nullptr;
return;
}
#endif // DEBUG
noway_assert(!"fldSeqIter is NotAField() in ParseArrayAddress");
}
if (!FieldSeqStore::IsPseudoField(fldSeqIter->m_fieldHnd))
{
if (*pFldSeq == nullptr)
{
*pFldSeq = fldSeqIter;
}
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldSeqIter->m_fieldHnd != nullptr);
CorInfoType cit = comp->info.compCompHnd->getFieldType(fldSeqIter->m_fieldHnd, &fldCls);
fieldOffsets += comp->compGetTypeSize(cit, fldCls);
}
fldSeqIter = fldSeqIter->m_next;
}
// Is there some portion of the "offset" beyond the first-elem offset and the struct field suffix we just computed?
if (!FitsIn<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset) ||
!FitsIn<target_ssize_t>(arrayInfo->m_elemSize))
{
// This seems unlikely, but no harm in being safe...
*pInxVN = comp->GetValueNumStore()->VNForExpr(nullptr, TYP_INT);
return;
}
// Otherwise...
target_ssize_t offsetAccountedFor = static_cast<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset);
target_ssize_t elemSize = static_cast<target_ssize_t>(arrayInfo->m_elemSize);
target_ssize_t constIndOffset = offset - offsetAccountedFor;
// This should be divisible by the element size...
assert((constIndOffset % elemSize) == 0);
target_ssize_t constInd = constIndOffset / elemSize;
ValueNumStore* vnStore = comp->GetValueNumStore();
if (inxVN == ValueNumStore::NoVN)
{
// Must be a constant index.
*pInxVN = vnStore->VNForPtrSizeIntCon(constInd);
}
else
{
//
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
//
// The value associated with the index value number (inxVN) is the offset into the array,
// which has been scaled by element size. We need to recover the array index from that offset
if (vnStore->IsVNConstant(inxVN))
{
target_ssize_t index = vnStore->CoercedConstantValue<target_ssize_t>(inxVN);
noway_assert(elemSize > 0 && ((index % elemSize) == 0));
*pInxVN = vnStore->VNForPtrSizeIntCon((index / elemSize) + constInd);
}
else
{
bool canFoldDiv = false;
// If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
// the division by elemSize.
VNFuncApp funcApp;
if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc)GT_MUL)
{
ValueNum vnForElemSize = vnStore->VNForLongCon(elemSize);
// One of the multiply operand is elemSize, so the resulting
// index VN should simply be the other operand.
if (funcApp.m_args[1] == vnForElemSize)
{
*pInxVN = funcApp.m_args[0];
canFoldDiv = true;
}
else if (funcApp.m_args[0] == vnForElemSize)
{
*pInxVN = funcApp.m_args[1];
canFoldDiv = true;
}
}
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
if (!canFoldDiv)
{
ValueNum vnForElemSize = vnStore->VNForPtrSizeIntCon(elemSize);
ValueNum vnForScaledInx = vnStore->VNForFunc(TYP_I_IMPL, VNFunc(GT_DIV), inxVN, vnForElemSize);
*pInxVN = vnForScaledInx;
}
if (constInd != 0)
{
ValueNum vnForConstInd = comp->GetValueNumStore()->VNForPtrSizeIntCon(constInd);
VNFunc vnFunc = VNFunc(GT_ADD);
*pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL, vnFunc, *pInxVN, vnForConstInd);
}
}
}
}
void GenTree::ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq)
{
if (TypeGet() == TYP_REF)
{
// This must be the array pointer.
*pArr = this;
assert(inputMul == 1); // Can't multiply the array pointer by anything.
}
else
{
switch (OperGet())
{
case GT_CNS_INT:
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, AsIntCon()->gtFieldSeq);
assert(!AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
*pOffset += (inputMul * (target_ssize_t)(AsIntCon()->gtIconVal));
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
if (OperGet() == GT_SUB)
{
inputMul = -inputMul;
}
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
case GT_MUL:
{
// If one op is a constant, continue parsing down.
target_ssize_t subMul = 0;
GenTree* nonConst = nullptr;
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
AsOp()->gtOp2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
else
{
assert(!AsOp()->gtOp1->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp1->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp2;
}
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
if (nonConst != nullptr)
{
nonConst->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
}
break;
case GT_LSH:
// If one op is a constant, continue parsing down.
if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
target_ssize_t shiftVal = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
target_ssize_t subMul = target_ssize_t{1} << shiftVal;
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
break;
case GT_COMMA:
// We don't care about exceptions for this purpose.
if (AsOp()->gtOp1->OperIs(GT_BOUNDS_CHECK) || AsOp()->gtOp1->IsNothingNode())
{
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
break;
default:
break;
}
// If we didn't return above, must be a contribution to the non-constant part of the index VN.
ValueNum vn = comp->GetValueNumStore()->VNLiberalNormalValue(gtVNPair);
if (inputMul != 1)
{
ValueNum mulVN = comp->GetValueNumStore()->VNForLongCon(inputMul);
vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_MUL), mulVN, vn);
}
if (*pInxVN == ValueNumStore::NoVN)
{
*pInxVN = vn;
}
else
{
*pInxVN = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_ADD), *pInxVN, vn);
}
}
}
bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
if (OperIsIndir())
{
if (gtFlags & GTF_IND_ARR_INDEX)
{
bool b = comp->GetArrayInfoMap()->Lookup(this, arrayInfo);
assert(b);
return true;
}
// Otherwise...
GenTree* addr = AsIndir()->Addr();
return addr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
else
{
return false;
}
}
bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_ADD:
{
GenTree* arrAddr = nullptr;
GenTree* offset = nullptr;
if (AsOp()->gtOp1->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp1;
offset = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp2;
offset = AsOp()->gtOp1;
}
else
{
return false;
}
if (!offset->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return arrAddr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
case GT_ADDR:
{
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->OperGet() != GT_IND)
{
return false;
}
else
{
// The "Addr" node might be annotated with a zero-offset field sequence.
FieldSeqNode* zeroOffsetFldSeq = nullptr;
if (comp->GetZeroOffsetFieldMap()->Lookup(this, &zeroOffsetFldSeq))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, zeroOffsetFldSeq);
}
return addrArg->ParseArrayElemForm(comp, arrayInfo, pFldSeq);
}
}
default:
return false;
}
}
bool GenTree::ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_CNS_INT:
{
GenTreeIntCon* icon = AsIntCon();
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
return true;
}
case GT_ADD:
if (!AsOp()->gtOp1->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return AsOp()->gtOp2->ParseOffsetForm(comp, pFldSeq);
default:
return false;
}
}
void GenTree::LabelIndex(Compiler* comp, bool isConst)
{
switch (OperGet())
{
case GT_CNS_INT:
// If we got here, this is a contribution to the constant part of the index.
if (isConst)
{
AsIntCon()->gtFieldSeq =
comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
return;
case GT_LCL_VAR:
gtFlags |= GTF_VAR_ARR_INDEX;
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->LabelIndex(comp, isConst);
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
case GT_CAST:
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
case GT_ARR_LENGTH:
gtFlags |= GTF_ARRLEN_ARR_IDX;
return;
default:
// For all other operators, peel off one constant; and then label the other if it's also a constant.
if (OperIsArithmetic() || OperIsCompare())
{
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
}
else if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
}
// Otherwise continue downward on both, labeling vars.
AsOp()->gtOp1->LabelIndex(comp, false);
AsOp()->gtOp2->LabelIndex(comp, false);
}
break;
}
}
// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
//
// static
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr);
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd)
{
FieldSeqNode fsn(fieldHnd, nullptr);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
{
if (a == nullptr)
{
return b;
}
else if (a == NotAField())
{
return NotAField();
}
else if (b == nullptr)
{
return a;
}
else if (b == NotAField())
{
return NotAField();
// Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
// together collapse to one.
}
else if (a->m_next == nullptr && a->m_fieldHnd == ConstantIndexPseudoField &&
b->m_fieldHnd == ConstantIndexPseudoField)
{
return b;
}
else
{
// We should never add a duplicate FieldSeqNode
assert(a != b);
FieldSeqNode* tmp = Append(a->m_next, b);
FieldSeqNode fsn(a->m_fieldHnd, tmp);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
}
// Static vars.
int FieldSeqStore::FirstElemPseudoFieldStruct;
int FieldSeqStore::ConstantIndexPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
bool FieldSeqNode::IsFirstElemFieldSeq()
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField;
}
bool FieldSeqNode::IsConstantIndexFieldSeq()
{
return m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
bool FieldSeqNode::IsPseudoField() const
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField || m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
#ifdef FEATURE_SIMD
GenTreeSIMD* Compiler::gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, op2, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
//-------------------------------------------------------------------
// SetOpLclRelatedToSIMDIntrinsic: Determine if the tree has a local var that needs to be set
// as used by a SIMD intrinsic, and if so, set that local var appropriately.
//
// Arguments:
// op - The tree, to be an operand of a new GT_SIMD node, to check.
//
void Compiler::SetOpLclRelatedToSIMDIntrinsic(GenTree* op)
{
if (op == nullptr)
{
return;
}
if (op->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(op);
}
else if (op->OperIs(GT_OBJ))
{
GenTree* addr = op->AsIndir()->Addr();
if (addr->OperIs(GT_ADDR))
{
GenTree* addrOp1 = addr->AsOp()->gtGetOp1();
if (addrOp1->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(addrOp1);
}
}
}
}
bool GenTree::isCommutativeSIMDIntrinsic()
{
assert(gtOper == GT_SIMD);
switch (AsSIMD()->GetSIMDIntrinsicId())
{
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
return true;
default:
return false;
}
}
void GenTreeMultiOp::ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount)
{
size_t oldOperandCount = GetOperandCount();
GenTree** oldOperands = GetOperandArray();
if (newOperandCount > oldOperandCount)
{
if (newOperandCount <= inlineOperandCount)
{
assert(oldOperandCount <= inlineOperandCount);
assert(oldOperands == inlineOperands);
}
else
{
// The most difficult case: we need to recreate the dynamic array.
assert(compiler != nullptr);
m_operands = compiler->getAllocator(CMK_ASTNode).allocate<GenTree*>(newOperandCount);
}
}
else
{
// We are shrinking the array and may in process switch to an inline representation.
// We choose to do so for simplicity ("if a node has <= InlineOperandCount operands,
// then it stores them inline"), but actually it may be more profitable to not do that,
// it will save us a copy and a potential cache miss (though the latter seems unlikely).
if ((newOperandCount <= inlineOperandCount) && (oldOperands != inlineOperands))
{
m_operands = inlineOperands;
}
}
#ifdef DEBUG
for (size_t i = 0; i < newOperandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
SetOperandCount(newOperandCount);
}
/* static */ bool GenTreeMultiOp::OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2)
{
if (op1->GetOperandCount() != op2->GetOperandCount())
{
return false;
}
for (size_t i = 1; i <= op1->GetOperandCount(); i++)
{
if (!Compare(op1->Op(i), op2->Op(i)))
{
return false;
}
}
return true;
}
void GenTreeMultiOp::InitializeOperands(GenTree** operands, size_t operandCount)
{
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = operands[i];
gtFlags |= (operands[i]->gtFlags & GTF_ALL_EFFECT);
}
SetOperandCount(operandCount);
}
var_types GenTreeJitIntrinsic::GetAuxiliaryType() const
{
CorInfoType auxiliaryJitType = GetAuxiliaryJitType();
if (auxiliaryJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(auxiliaryJitType);
}
var_types GenTreeJitIntrinsic::GetSimdBaseType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(simdBaseJitType);
}
// Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeSIMD::OperIsMemoryLoad() const
{
if (GetSIMDIntrinsicId() == SIMDIntrinsicInitArray)
{
return true;
}
return false;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeSIMD::Equals(GenTreeSIMD* op1, GenTreeSIMD* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetSIMDIntrinsicId() == op2->GetSIMDIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool GenTree::isCommutativeHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
return HWIntrinsicInfo::IsCommutative(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isContainableHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_SSE_LoadAlignedVector128:
case NI_SSE_LoadScalarVector128:
case NI_SSE_LoadVector128:
case NI_SSE2_LoadAlignedVector128:
case NI_SSE2_LoadScalarVector128:
case NI_SSE2_LoadVector128:
case NI_AVX_LoadAlignedVector256:
case NI_AVX_LoadVector256:
case NI_AVX_ExtractVector128:
case NI_AVX2_ExtractVector128:
{
return true;
}
default:
{
return false;
}
}
#elif TARGET_ARM64
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_Vector64_get_Zero:
case NI_Vector128_get_Zero:
{
return true;
}
default:
{
return false;
}
}
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isRMWHWIntrinsic(Compiler* comp)
{
assert(gtOper == GT_HWINTRINSIC);
assert(comp != nullptr);
#if defined(TARGET_XARCH)
if (!comp->canUseVexEncoding())
{
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
}
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
// TODO-XArch-Cleanup: Move this switch block to be table driven.
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
case NI_FMA_MultiplyAdd:
case NI_FMA_MultiplyAddNegated:
case NI_FMA_MultiplyAddNegatedScalar:
case NI_FMA_MultiplyAddScalar:
case NI_FMA_MultiplyAddSubtract:
case NI_FMA_MultiplySubtract:
case NI_FMA_MultiplySubtractAdd:
case NI_FMA_MultiplySubtractNegated:
case NI_FMA_MultiplySubtractNegatedScalar:
case NI_FMA_MultiplySubtractScalar:
{
return true;
}
default:
{
return false;
}
}
#elif defined(TARGET_ARM64)
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
SetOpLclRelatedToSIMDIntrinsic(op4);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic, op1, op2, op3, op4);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount);
for (size_t i = 0; i < operandCount; i++)
{
nodeBuilder.AddOperand(i, operands[i]);
SetOpLclRelatedToSIMDIntrinsic(operands[i]);
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++)
{
SetOpLclRelatedToSIMDIntrinsic(nodeBuilder.GetOperand(i));
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeGet() == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
if (varTypeIsUnsigned(simdBaseType))
{
return op1;
}
#if defined(TARGET_XARCH)
if (varTypeIsFloating(simdBaseType))
{
// Abs(v) = v & ~new vector<T>(-0.0);
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
GenTree* bitMask = gtNewDconNode(-0.0, simdBaseType);
bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3)))
{
NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs;
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
GenTree* tmp;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
// op1 = op1 < Zero
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// tmp = Zero - op1Dup1
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, tmp, op1Dup2)
return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
NamedIntrinsic intrinsic = NI_AdvSimd_Abs;
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
else if (varTypeIsLong(simdBaseType))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif
}
GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op1 != nullptr);
assert(op1->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
assert(op2 != nullptr);
if ((op == GT_LSH) || (op == GT_RSH) || (op == GT_RSZ))
{
assert(op2->TypeIs(TYP_INT));
}
else
{
assert(op2->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
}
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_ADD:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Add;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Add;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Add;
}
else
{
intrinsic = NI_SSE2_Add;
}
break;
}
case GT_AND:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_And;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_And;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_And;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_And;
}
else
{
intrinsic = NI_SSE2_And;
}
break;
}
case GT_AND_NOT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_AndNot;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_AndNot;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_AndNot;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_AndNot;
}
else
{
intrinsic = NI_SSE2_AndNot;
}
// GT_AND_NOT expects `op1 & ~op2`, but xarch does `~op1 & op2`
std::swap(op1, op2);
break;
}
case GT_DIV:
{
// TODO-XARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Divide;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Divide;
}
else
{
intrinsic = NI_SSE2_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsByte(simdBaseType));
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT,
16, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (op == GT_LSH)
{
intrinsic = NI_AVX2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AVX2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AVX2_ShiftRightLogical;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_SSE2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_SSE2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_SSE2_ShiftRightLogical;
}
break;
}
case GT_MUL:
{
GenTree** broadcastOp = nullptr;
if (varTypeIsArithmetic(op1))
{
broadcastOp = &op1;
}
else if (varTypeIsArithmetic(op2))
{
broadcastOp = &op2;
}
if (broadcastOp != nullptr)
{
*broadcastOp =
gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else
{
intrinsic = NI_SSE2_MultiplyLow;
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_MultiplyLow;
}
else
{
// op1Dup = op1
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector multiply"));
// op2Dup = op2
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector multiply"));
// op1 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Multiply(op2.AsUInt32(), op1.AsUInt32()).AsInt32()
op2 = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Shuffle(op2, (0, 0, 2, 0))
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32()
op1 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG,
simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = Sse2.UnpackLow(op1, op2)
intrinsic = NI_SSE2_UnpackLow;
}
break;
}
case TYP_FLOAT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE2_Multiply;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Or;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Or;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Or;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Or;
}
else
{
intrinsic = NI_SSE2_Or;
}
break;
}
case GT_SUB:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Subtract;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Subtract;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Subtract;
}
else
{
intrinsic = NI_SSE2_Subtract;
}
break;
}
case GT_XOR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Xor;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Xor;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Xor;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Xor;
}
else
{
intrinsic = NI_SSE2_Xor;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_ADD:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AddScalar : NI_AdvSimd_Arm64_Add;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_AddScalar;
}
else
{
intrinsic = NI_AdvSimd_Add;
}
break;
}
case GT_AND:
{
intrinsic = NI_AdvSimd_And;
break;
}
case GT_AND_NOT:
{
intrinsic = NI_AdvSimd_BitwiseClear;
break;
}
case GT_DIV:
{
// TODO-AARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_DivideScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmeticScalar;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogical;
}
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
if (op != GT_LSH)
{
op2 = gtNewOperNode(GT_NEG, TYP_INT, op2);
}
op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmeticScalar;
}
else
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftLogical;
}
}
break;
}
case GT_MUL:
{
assert(!varTypeIsLong(simdBaseType));
GenTree** scalarOp = nullptr;
if (varTypeIsArithmetic(op1))
{
// MultiplyByScalar requires the scalar op to be op2
std::swap(op1, op2);
scalarOp = &op2;
}
else if (varTypeIsArithmetic(op2))
{
scalarOp = &op2;
}
switch (JitType2PreciseVarType(simdBaseJitType))
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (scalarOp != nullptr)
{
*scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
intrinsic = NI_AdvSimd_Multiply;
break;
}
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_CreateScalarUnsafe,
simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create, simdBaseJitType,
8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Arm64_Multiply;
}
if (simdSize == 8)
{
intrinsic = NI_AdvSimd_MultiplyScalar;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
intrinsic = NI_AdvSimd_Or;
break;
}
case GT_SUB:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_SubtractScalar : NI_AdvSimd_Arm64_Subtract;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_SubtractScalar;
}
else
{
intrinsic = NI_AdvSimd_Subtract;
}
break;
}
case GT_XOR:
{
intrinsic = NI_AdvSimd_Xor;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Ceiling;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Ceiling;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_CeilingScalar : NI_AdvSimd_Arm64_Ceiling;
}
else
{
intrinsic = NI_AdvSimd_Ceiling;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareEqual;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareEqual;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_CompareEqual;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// tmp = (op1 == op2) i.e. compare for equality as if op1 and op2 are vector of int
// op1 = tmp
// op2 = Shuffle(tmp, (2, 3, 0, 1))
// result = BitwiseAnd(op1, op2)
//
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of
// respective long elements.
GenTree* tmp =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp for vector Equals"));
op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareEqual;
}
break;
}
case GT_GE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareGreaterThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = GreaterThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_GT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports > for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector GreaterThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareGreaterThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareGreaterThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// GreaterThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector GreaterThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareLessThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = LessThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_LT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports < for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector LessThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareLessThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareLessThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// LessThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector LessThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareLessThan;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareEqual;
}
break;
}
case GT_GE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareGreaterThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThanOrEqual;
}
break;
}
case GT_GT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic =
(simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanScalar : NI_AdvSimd_Arm64_CompareGreaterThan;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareLessThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThanOrEqual;
}
break;
}
case GT_LT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanScalar : NI_AdvSimd_Arm64_CompareLessThan;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThan;
}
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
}
else
{
intrinsic = NI_Vector128_op_Equality;
}
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
getAllBitsSet = NI_Vector256_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Equality : NI_Vector128_op_Equality;
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 8)
{
intrinsic = NI_Vector64_op_Equality;
getAllBitsSet = NI_Vector64_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
assert(op3 != nullptr);
assert(op3->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
// TODO-XARCH-CQ: It's likely beneficial to have a dedicated CndSel node so we
// can special case when the condition is the result of various compare operations.
//
// When it is, the condition is AllBitsSet or Zero on a per-element basis and we
// could change this to be a Blend operation in lowering as an optimization.
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector conditional select"));
// op2 = op2 & op1
op2 = gtNewSimdBinOpNode(GT_AND, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op3 = op3 & ~op1Dup
op3 = gtNewSimdBinOpNode(GT_AND_NOT, type, op3, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op2 | op3
return gtNewSimdBinOpNode(GT_OR, type, op2, op3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#if defined(TARGET_XARCH)
#if defined(TARGET_X86)
if (varTypeIsLong(simdBaseType) && !op1->IsIntegralConst())
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
// works on 32-bit x86 systems.
unreached();
}
#endif // TARGET_X86
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_Create;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
hwIntrinsicID = NI_Vector64_Create;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsArithmetic(type));
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(JITtype2varType(simdBaseJitType) == type);
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
if (simdSize == 32)
{
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_Dot;
}
else
{
assert(((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) ||
compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_Vector128_Dot;
}
#elif defined(TARGET_ARM64)
assert(!varTypeIsLong(simdBaseType));
intrinsic = (simdSize == 8) ? NI_Vector64_Dot : NI_Vector128_Dot;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
intrinsic = NI_AVX_Floor;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Floor;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_FloorScalar : NI_AdvSimd_Arm64_Floor;
}
else
{
intrinsic = NI_AdvSimd_Floor;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic intrinsicId = NI_Vector128_GetElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
intrinsicId = NI_Vector256_GetElement;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
intrinsicId = NI_Vector64_GetElement;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
int immUpperBound = getSIMDVectorLength(simdSize, simdBaseType) - 1;
bool rangeCheckNeeded = !op2->OperIsConst();
if (!rangeCheckNeeded)
{
ssize_t imm8 = op2->AsIntCon()->IconValue();
rangeCheckNeeded = (imm8 < 0) || (imm8 > immUpperBound);
}
if (rangeCheckNeeded)
{
op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
}
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Max;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Max;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Max(op1, op2)
op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Max;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Max;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MaxScalar : NI_AdvSimd_Arm64_Max;
}
else
{
intrinsic = NI_AdvSimd_Max;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max"));
// op1 = op1 > op2
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Min;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Min;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Min(op1, op2)
op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Min;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Min;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MinScalar : NI_AdvSimd_Arm64_Min;
}
else
{
intrinsic = NI_AdvSimd_Min;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min"));
// op1 = op1 < op2
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
GenTree* tmp1;
GenTree* tmp2;
#if defined(TARGET_XARCH)
GenTree* tmp3;
GenTree* tmp4;
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// This is the same in principle to the other comments below, however due to
// code formatting, its too long to reasonably display here.
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U | 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8L, 8U, 9L, 9U, AL, AU, BL, BU | CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, -- | 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, -- | CL, --, DL, --, EL, --, FL, --
// tmp4 = Elements 0L, 1L, 2L, 3L, 8L, 9L, AL, BL | 4L, 5L, 6L, 7L, CL, DL, EL, FL
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L | 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector256.Create(0x0000FFFF).AsInt16();
// var tmp2 = Avx2.And(op1.AsInt16(), tmp1);
// var tmp3 = Avx2.And(op2.AsInt16(), tmp1);
// var tmp4 = Avx2.PackUnsignedSaturate(tmp2, tmp3);
// return Avx2.Permute4x64(tmp4.AsUInt64(), SHUFFLE_WYZX).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate, CORINFO_TYPE_USHORT,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0, 1 | 2, 3; 0L, 0U, 1L, 1U | 2L, 2U, 3L, 3U
// op2 = Elements 4, 5 | 6, 7; 4L, 4U, 5L, 5U | 6L, 6U, 7L, 7U
//
// tmp1 = Elements 0L, 4L, 0U, 4U | 2L, 6L, 2U, 6U
// tmp2 = Elements 1L, 5L, 1U, 5U | 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 1L, 4L, 5L | 2L, 3L, 6L, 7L
// return Elements 0L, 1L, 2L, 3L | 4L, 5L, 6L, 7L
//
// var tmp1 = Avx2.UnpackLow(op1, op2);
// var tmp2 = Avx2.UnpackHigh(op1, op2);
// var tmp3 = Avx2.UnpackLow(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
opBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1 | 2, 3
// op2 = Elements 4, 5 | 6, 7
//
// tmp1 = Elements 0, 1, 2, 3 | -, -, -, -
// tmp1 = Elements 4, 5, 6, 7
// return Elements 0, 1, 2, 3 | 4, 5, 6, 7
//
// var tmp1 = Avx.ConvertToVector128Single(op1).ToVector256Unsafe();
// var tmp2 = Avx.ConvertToVector128Single(op2);
// return Avx.InsertVector128(tmp1, tmp2, 1);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, gtNewIconNode(1), NI_AVX_InsertVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
// op1 = Elements 0, 1, 2, 3, 4, 5, 6, 7; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U, 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8, 9, A, B, C, D, E, F; 8L, 8U, 9L, 9U, AL, AU, BL, BU, CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --, 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, --, CL, --, DL, --, EL, --, FL, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector128.Create((ushort)(0x00FF)).AsSByte();
// var tmp2 = Sse2.And(op1.AsSByte(), tmp1);
// var tmp3 = Sse2.And(op2.AsSByte(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
// op1 = Elements 0, 1, 2, 3; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U
// op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
//
// ...
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// ...
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --
// tmp3 = Elements 4L, --, 5L, --, 6L, --, 7L, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Vector128.Create(0x0000FFFF).AsInt16();
// var tmp2 = Sse2.And(op1.AsInt16(), tmp1);
// var tmp3 = Sse2.And(op2.AsInt16(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp2, tmp3).As<T>();
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate,
CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic);
}
else
{
// ...
//
// tmp1 = Elements 0L, 4L, 0U, 4U, 1L, 5L, 1U, 5U
// tmp2 = Elements 2L, 6L, 2U, 6U, 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 2L, 4L, 6L, 0U, 2U, 4U, 6U
// tmp4 = Elements 1L, 3L, 5L, 7L, 1U, 3U, 5U, 7U
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt16(), op2.AsUInt16());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt16(), op2.AsUInt16());
// var tmp3 = Sse2.UnpackLow(tmp1, tmp2);
// var tmp4 = Sse2.UnpackHigh(tmp1, tmp2);
// return Sse2.UnpackLow(tmp3, tmp4).As<T>();
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
GenTree* tmp2Dup;
tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp2 for vector narrow"));
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
}
case TYP_INT:
case TYP_UINT:
{
// op1 = Elements 0, 1; 0L, 0U, 1L, 1U
// op2 = Elements 2, 3; 2L, 2U, 3L, 3U
//
// tmp1 = Elements 0L, 2L, 0U, 2U
// tmp2 = Elements 1L, 3L, 1U, 3U
// return Elements 0L, 1L, 2L, 3L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt32(), op2.AsUInt32());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32());
// return Sse2.UnpackLow(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1
// op2 = Elements 2, 3
//
// tmp1 = Elements 0, 1, -, -
// tmp1 = Elements 2, 3, -, -
// return Elements 0, 1, 2, 3
//
// var tmp1 = Sse2.ConvertToVector128Single(op1);
// var tmp2 = Sse2.ConvertToVector128Single(op2);
// return Sse.MoveLowToHigh(tmp1, tmp2);
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1);
// return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = AdvSimd.ExtractNarrowingLower(op1);
// return AdvSimd.ExtractNarrowingUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
else if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = op1.ToVector128Unsafe();
// return AdvSimd.Arm64.ConvertToSingleLower(tmp1);
CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1.AsUInt64(), 1, op2.AsUInt64());
// return AdvSimd.ExtractNarrowingUpper(tmp2).As<T>();
CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Sqrt;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Sqrt;
}
else
{
intrinsic = NI_SSE2_Sqrt;
}
#elif defined(TARGET_ARM64)
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_SqrtScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Sqrt;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp = nullptr;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType);
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
// HorizontalAdd combines pairs so we need log2(vectorLength) passes to sum all elements together.
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
if (simdSize == 32)
{
// Minus 1 because for the last pass we split the vector to low / high and add them together.
haddCount -= 1;
if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_HorizontalAdd;
}
}
else if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE3));
intrinsic = NI_SSE3_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSSE3));
intrinsic = NI_SSSE3_HorizontalAdd;
}
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add;
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdHWIntrinsicNode(simdType, tmp, NI_Vector256_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
{
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 8)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
if (simdSize == 8)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_DOUBLE:
case TYP_LONG:
case TYP_ULONG:
{
if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* op2 = nullptr;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_NEG:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
}
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// Zero - op1
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case GT_NOT:
{
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = (simdSize == 32) ? NI_Vector256_get_AllBitsSet : NI_Vector128_get_AllBitsSet;
op2 = gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 ^ AllBitsSet
return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
case GT_NEG:
{
if (varTypeIsSigned(simdBaseType))
{
if (simdBaseType == TYP_LONG)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else
{
intrinsic = NI_AdvSimd_Negate;
}
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
// Zero - op1
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
case GT_NOT:
{
return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
}
GenTree* Compiler::gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 =
gtNewSimdHWIntrinsicNode(type, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE2_ConvertToVector128Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen lower"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
tmp1 = op1;
}
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic);
if (simdSize == 8)
{
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return tmp1;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(1), NI_AVX_ExtractVector128, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
GenTree* zero;
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDoubleUpper;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningUpper;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningUpper;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
ssize_t index = 8 / genTypeSize(simdBaseType);
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
zero = gtNewSimdZeroNode(TYP_SIMD16, simdBaseJitType, 16, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128,
simdBaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op2->IsCnsIntOrI());
ssize_t imm8 = op2->AsIntCon()->IconValue();
ssize_t count = simdSize / genTypeSize(simdBaseType);
assert((0 <= imm8) && (imm8 < count));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41_X64));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_WithElement;
}
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
if (simdSize == 8)
{
return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
break;
case TYP_FLOAT:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
break;
default:
unreached();
}
hwIntrinsicID = NI_AdvSimd_Insert;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
intrinsic = (simdSize == 32) ? NI_Vector256_get_Zero : NI_Vector128_get_Zero;
#elif defined(TARGET_ARM64)
intrinsic = (simdSize > 8) ? NI_Vector128_get_Zero : NI_Vector64_get_Zero;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2, op3);
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoad() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
NamedIntrinsic intrinsicId = GetHWIntrinsicId();
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
if (category == HW_Category_MemoryLoad)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryLoad(GetHWIntrinsicId()))
{
// Some intrinsics (without HW_Category_MemoryLoad) also have MemoryLoad semantics
// This is generally because they have both vector and pointer overloads, e.g.,
// * Vector128<byte> BroadcastScalarToVector128(Vector128<byte> value)
// * Vector128<byte> BroadcastScalarToVector128(byte* source)
// So, we need to check the argument's type is memory-reference or Vector128
if ((category == HW_Category_SimpleSIMD) || (category == HW_Category_SIMDScalar))
{
assert(GetOperandCount() == 1);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
CorInfoType auxiliaryType = GetAuxiliaryJitType();
if (auxiliaryType == CORINFO_TYPE_PTR)
{
return true;
}
assert(auxiliaryType == CORINFO_TYPE_UNDEF);
return false;
}
default:
{
unreached();
}
}
}
else if (category == HW_Category_IMM)
{
// Do we have less than 3 operands?
if (GetOperandCount() < 3)
{
return false;
}
else if (HWIntrinsicInfo::isAVX2GatherIntrinsic(GetHWIntrinsicId()))
{
return true;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(GetHWIntrinsicId());
if (category == HW_Category_MemoryStore)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryStore(GetHWIntrinsicId()) &&
(category == HW_Category_IMM || category == HW_Category_Scalar))
{
// Some intrinsics (without HW_Category_MemoryStore) also have MemoryStore semantics
// Bmi2/Bmi2.X64.MultiplyNoFlags may return the lower half result by a out argument
// unsafe ulong MultiplyNoFlags(ulong left, ulong right, ulong* low)
//
// So, the 3-argument form is MemoryStore
if (GetOperandCount() == 3)
{
switch (GetHWIntrinsicId())
{
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
return true;
default:
return false;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad or MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoadOrStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return OperIsMemoryLoad() || OperIsMemoryStore();
#else
return false;
#endif
}
NamedIntrinsic GenTreeHWIntrinsic::GetHWIntrinsicId() const
{
NamedIntrinsic id = gtHWIntrinsicId;
int numArgs = HWIntrinsicInfo::lookupNumArgs(id);
bool numArgsUnknown = numArgs < 0;
assert((static_cast<size_t>(numArgs) == GetOperandCount()) || numArgsUnknown);
return id;
}
void GenTreeHWIntrinsic::SetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
#ifdef DEBUG
size_t oldOperandCount = GetOperandCount();
int newOperandCount = HWIntrinsicInfo::lookupNumArgs(intrinsicId);
bool newCountUnknown = newOperandCount < 0;
// We'll choose to trust the programmer here.
assert((oldOperandCount == static_cast<size_t>(newOperandCount)) || newCountUnknown);
#endif // DEBUG
gtHWIntrinsicId = intrinsicId;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeHWIntrinsic::Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetHWIntrinsicId() == op2->GetHWIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
(op1->GetAuxiliaryType() == op2->GetAuxiliaryType()) && (op1->GetOtherReg() == op2->GetOtherReg()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_HW_INTRINSICS
//---------------------------------------------------------------------------------------
// gtNewMustThrowException:
// create a throw node (calling into JIT helper) that must be thrown.
// The result would be a comma node: COMMA(jithelperthrow(void), x) where x's type should be specified.
//
// Arguments
// helper - JIT helper ID
// type - return type of the node
//
// Return Value
// pointer to the throw node
//
GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd)
{
GenTreeCall* node = gtNewHelperCallNode(helper, TYP_VOID);
node->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
if (type != TYP_VOID)
{
unsigned dummyTemp = lvaGrabTemp(true DEBUGARG("dummy temp of must thrown exception"));
if (type == TYP_STRUCT)
{
lvaSetStruct(dummyTemp, clsHnd, false);
type = lvaTable[dummyTemp].lvType; // struct type is normalized
}
else
{
lvaTable[dummyTemp].lvType = type;
}
GenTree* dummyNode = gtNewLclvNode(dummyTemp, type);
return gtNewOperNode(GT_COMMA, type, node, dummyNode);
}
return node;
}
//---------------------------------------------------------------------------------------
// InitializeStructReturnType:
// Initialize the Return Type Descriptor for a method that returns a struct type
//
// Arguments
// comp - Compiler Instance
// retClsHnd - VM handle to the struct type returned by the method
//
// Return Value
// None
//
void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension callConv)
{
assert(!m_inited);
#if FEATURE_MULTIREG_RET
assert(retClsHnd != NO_CLASS_HANDLE);
unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, callConv, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
case Compiler::SPK_EnclosingType:
m_isEnclosingType = true;
FALLTHROUGH;
case Compiler::SPK_PrimitiveType:
{
assert(returnType != TYP_UNKNOWN);
assert(returnType != TYP_STRUCT);
m_regType[0] = returnType;
break;
}
case Compiler::SPK_ByValueAsHfa:
{
assert(varTypeIsStruct(returnType));
var_types hfaType = comp->GetHfaType(retClsHnd);
// We should have an hfa struct type
assert(varTypeIsValidHfaType(hfaType));
// Note that the retail build issues a warning about a potential divsion by zero without this Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
// The size of this struct should be evenly divisible by elemSize
assert((structSize % elemSize) == 0);
unsigned hfaCount = (structSize / elemSize);
for (unsigned i = 0; i < hfaCount; ++i)
{
m_regType[i] = hfaType;
}
if (comp->compFloatingPointUsed == false)
{
comp->compFloatingPointUsed = true;
}
break;
}
case Compiler::SPK_ByValue:
{
assert(varTypeIsStruct(returnType));
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
assert(structDesc.passedInRegisters);
for (int i = 0; i < structDesc.eightByteCount; i++)
{
assert(i < MAX_RET_REG_COUNT);
m_regType[i] = comp->GetEightByteType(structDesc, i);
}
#elif defined(TARGET_ARM64)
// a non-HFA struct returned using two registers
//
assert((structSize > TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#elif defined(TARGET_X86)
// an 8-byte struct returned using two registers
assert(structSize == 8);
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#else // TARGET_XXX
// This target needs support here!
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
case Compiler::SPK_ByReference:
// We are returning using the return buffer argument
// There are no return registers
break;
default:
unreached(); // By the contract of getReturnTypeForStruct we should never get here.
} // end of switch (howToReturnStruct)
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
m_inited = true;
#endif
}
//---------------------------------------------------------------------------------------
// InitializeLongReturnType:
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
//
void ReturnTypeDesc::InitializeLongReturnType()
{
assert(!m_inited);
#if defined(TARGET_X86) || defined(TARGET_ARM)
// Setups up a ReturnTypeDesc for returning a long using two registers
//
assert(MAX_RET_REG_COUNT >= 2);
m_regType[0] = TYP_INT;
m_regType[1] = TYP_INT;
#else // not (TARGET_X86 or TARGET_ARM)
m_regType[0] = TYP_LONG;
#endif // TARGET_X86 or TARGET_ARM
#ifdef DEBUG
m_inited = true;
#endif
}
//-------------------------------------------------------------------
// GetABIReturnReg: Return i'th return register as per target ABI
//
// Arguments:
// idx - Index of the return register.
// The first return register has an index of 0 and so on.
//
// Return Value:
// Returns i'th return register as per target ABI.
//
// Notes:
// x86 and ARM return long in multiple registers.
// ARM and ARM64 return HFA struct in multiple registers.
//
regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) const
{
unsigned count = GetReturnRegCount();
assert(idx < count);
regNumber resultReg = REG_NA;
#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET;
}
else
{
noway_assert(varTypeUsesFloatReg(regType0));
resultReg = REG_FLOATRET;
}
}
else if (idx == 1)
{
var_types regType1 = GetReturnRegType(1);
if (varTypeIsIntegralOrI(regType1))
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET_1;
}
else
{
resultReg = REG_INTRET;
}
}
else
{
noway_assert(varTypeUsesFloatReg(regType1));
if (varTypeUsesFloatReg(regType0))
{
resultReg = REG_FLOATRET_1;
}
else
{
resultReg = REG_FLOATRET;
}
}
}
#elif defined(TARGET_X86)
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
#elif defined(TARGET_ARM)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
// Ints are returned in one return register.
// Longs are returned in two return registers.
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
}
else
{
// Floats are returned in one return register (f0).
// Doubles are returned in one return register (d0).
// Structs are returned in four registers with HFAs.
assert(idx < MAX_RET_REG_COUNT); // Up to 4 return registers for HFA's
if (regType == TYP_DOUBLE)
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx * 2); // d0, d1, d2 or d3
}
else
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // f0, f1, f2 or f3
}
}
#elif defined(TARGET_ARM64)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
}
else
{
noway_assert(idx < 4); // Up to 4 return registers for HFA's
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // V0, V1, V2 or V3
}
#endif // TARGET_XXX
assert(resultReg != REG_NA);
return resultReg;
}
//--------------------------------------------------------------------------------
// GetABIReturnRegs: get the mask of return registers as per target arch ABI.
//
// Arguments:
// None
//
// Return Value:
// reg mask of return registers in which the return type is returned.
//
// Note:
// This routine can be used when the caller is not particular about the order
// of return registers and wants to know the set of return registers.
//
// static
regMaskTP ReturnTypeDesc::GetABIReturnRegs() const
{
regMaskTP resultMask = RBM_NONE;
unsigned count = GetReturnRegCount();
for (unsigned i = 0; i < count; ++i)
{
resultMask |= genRegMask(GetABIReturnReg(i));
}
return resultMask;
}
//------------------------------------------------------------------------
// The following functions manage the gtRsvdRegs set of temporary registers
// created by LSRA during code generation.
//------------------------------------------------------------------------
// AvailableTempRegCount: return the number of available temporary registers in the (optional) given set
// (typically, RBM_ALLINT or RBM_ALLFLOAT).
//
// Arguments:
// mask - (optional) Check for available temporary registers only in this set.
//
// Return Value:
// Count of available temporary registers in given set.
//
unsigned GenTree::AvailableTempRegCount(regMaskTP mask /* = (regMaskTP)-1 */) const
{
return genCountBits(gtRsvdRegs & mask);
}
//------------------------------------------------------------------------
// GetSingleTempReg: There is expected to be exactly one available temporary register
// in the given mask in the gtRsvdRegs set. Get that register. No future calls to get
// a temporary register are expected. Removes the register from the set, but only in
// DEBUG to avoid doing unnecessary work in non-DEBUG builds.
//
// Arguments:
// mask - (optional) Get an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::GetSingleTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) == 1);
regNumber tempReg = genRegNumFromMask(availableSet);
INDEBUG(gtRsvdRegs &= ~availableSet;) // Remove the register from the set, so it can't be used again.
return tempReg;
}
//------------------------------------------------------------------------
// ExtractTempReg: Find the lowest number temporary register from the gtRsvdRegs set
// that is also in the optional given mask (typically, RBM_ALLINT or RBM_ALLFLOAT),
// and return it. Remove this register from the temporary register set, so it won't
// be returned again.
//
// Arguments:
// mask - (optional) Extract an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::ExtractTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) >= 1);
regMaskTP tempRegMask = genFindLowestBit(availableSet);
gtRsvdRegs &= ~tempRegMask;
return genRegNumFromMask(tempRegMask);
}
//------------------------------------------------------------------------
// GetLclOffs: if `this` is a field or a field address it returns offset
// of the field inside the struct, for not a field it returns 0.
//
// Return Value:
// The offset value.
//
uint16_t GenTreeLclVarCommon::GetLclOffs() const
{
if (OperIsLocalField())
{
return AsLclFld()->GetLclOffs();
}
else
{
return 0;
}
}
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GetResultOpNumForFMA: check if the result is written into one of the operands.
// In the case that none of the operand is overwritten, check if any of them is lastUse.
//
// Return Value:
// The operand number overwritten or lastUse. 0 is the default value, where the result is written into
// a destination that is not one of the source operands and there is no last use op.
//
unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3)
{
// only FMA intrinsic node should call into this function
assert(HWIntrinsicInfo::lookupIsa(gtHWIntrinsicId) == InstructionSet_FMA);
if (use != nullptr && use->OperIs(GT_STORE_LCL_VAR))
{
// For store_lcl_var, check if any op is overwritten
GenTreeLclVarCommon* overwritten = use->AsLclVarCommon();
unsigned overwrittenLclNum = overwritten->GetLclNum();
if (op1->IsLocal() && op1->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 1;
}
else if (op2->IsLocal() && op2->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 2;
}
else if (op3->IsLocal() && op3->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 3;
}
}
// If no overwritten op, check if there is any last use op
// https://github.com/dotnet/runtime/issues/62215
if (op1->OperIs(GT_LCL_VAR) && op1->IsLastUse(0))
return 1;
else if (op2->OperIs(GT_LCL_VAR) && op2->IsLastUse(0))
return 2;
else if (op3->OperIs(GT_LCL_VAR) && op3->IsLastUse(0))
return 3;
return 0;
}
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// IsOffsetMisaligned: check if the field needs a special handling on arm.
//
// Return Value:
// true if it is a float field with a misaligned offset, false otherwise.
//
bool GenTreeLclFld::IsOffsetMisaligned() const
{
if (varTypeIsFloating(gtType))
{
return ((m_lclOffs % emitTypeSize(TYP_FLOAT)) != 0);
}
return false;
}
#endif // TARGET_ARM
bool GenTree::IsInvariant() const
{
return OperIsConst() || Compiler::impIsAddressInLocal(this);
}
//------------------------------------------------------------------------
// IsNeverNegative: returns true if the given tree is known to be never
// negative, i. e. the upper bit will always be zero.
// Only valid for integral types.
//
// Arguments:
// comp - Compiler object, needed for IntegralRange::ForNode
//
// Return Value:
// true if the given tree is known to be never negative
//
bool GenTree::IsNeverNegative(Compiler* comp) const
{
assert(varTypeIsIntegral(this));
if (IsIntegralConst())
{
return AsIntConCommon()->IntegralValue() >= 0;
}
// TODO-Casts: extend IntegralRange to handle constants
return IntegralRange::ForNode((GenTree*)this, comp).IsPositive();
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#include "hwintrinsic.h"
#include "simd.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
/*****************************************************************************/
const unsigned char GenTree::gtOperKindTable[] = {
#define GTNODE(en, st, cm, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm,
#include "gtlist.h"
};
#ifdef DEBUG
const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = {
#define GTNODE(en, st, cm, ok) static_cast<GenTreeDebugOperKind>((ok)&DBK_MASK),
#include "gtlist.h"
};
#endif // DEBUG
/*****************************************************************************
*
* The types of different GenTree nodes
*/
#ifdef DEBUG
#define INDENT_SIZE 3
//--------------------------------------------
//
// IndentStack: This struct is used, along with its related enums and strings,
// to control both the indendtation and the printing of arcs.
//
// Notes:
// The mode of printing is set in the Constructor, using its 'compiler' argument.
// Currently it only prints arcs when fgOrder == fgOrderLinear.
// The type of arc to print is specified by the IndentInfo enum, and is controlled
// by the caller of the Push() method.
enum IndentChars
{
ICVertical,
ICBottom,
ICTop,
ICMiddle,
ICDash,
ICTerminal,
ICError,
IndentCharCount
};
// clang-format off
// Sets of strings for different dumping options vert bot top mid dash embedded terminal error
static const char* emptyIndents[IndentCharCount] = { " ", " ", " ", " ", " ", "", "?" };
static const char* asciiIndents[IndentCharCount] = { "|", "\\", "/", "+", "-", "*", "?" };
static const char* unicodeIndents[IndentCharCount] = { "\xe2\x94\x82", "\xe2\x94\x94", "\xe2\x94\x8c", "\xe2\x94\x9c", "\xe2\x94\x80", "\xe2\x96\x8c", "?" };
// clang-format on
typedef ArrayStack<Compiler::IndentInfo> IndentInfoStack;
struct IndentStack
{
IndentInfoStack stack;
const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly))
{
if (compiler->asciiTrees)
{
indents = asciiIndents;
}
else
{
indents = unicodeIndents;
}
}
// Return the depth of the current indentation.
unsigned Depth()
{
return stack.Height();
}
// Push a new indentation onto the stack, of the given type.
void Push(Compiler::IndentInfo info)
{
stack.Push(info);
}
// Pop the most recent indentation type off the stack.
Compiler::IndentInfo Pop()
{
return stack.Pop();
}
// Print the current indentation and arcs.
void print()
{
unsigned indentCount = Depth();
for (unsigned i = 0; i < indentCount; i++)
{
unsigned index = indentCount - 1 - i;
switch (stack.Top(index))
{
case Compiler::IndentInfo::IINone:
printf(" ");
break;
case Compiler::IndentInfo::IIArc:
if (index == 0)
{
printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
}
else
{
printf("%s ", indents[ICVertical]);
}
break;
case Compiler::IndentInfo::IIArcBottom:
printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIArcTop:
printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIError:
printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
break;
default:
unreached();
}
}
printf("%s", indents[ICTerminal]);
}
};
//------------------------------------------------------------------------
// printIndent: This is a static method which simply invokes the 'print'
// method on its 'indentStack' argument.
//
// Arguments:
// indentStack - specifies the information for the indentation & arcs to be printed
//
// Notes:
// This method exists to localize the checking for the case where indentStack is null.
static void printIndent(IndentStack* indentStack)
{
if (indentStack == nullptr)
{
return;
}
indentStack->print();
}
#endif
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* opNames[] = {
#define GTNODE(en, st, cm, ok) #en,
#include "gtlist.h"
};
const char* GenTree::OpName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opNames));
return opNames[op];
}
#endif
#if MEASURE_NODE_SIZE
static const char* opStructNames[] = {
#define GTNODE(en, st, cm, ok) #st,
#include "gtlist.h"
};
const char* GenTree::OpStructName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opStructNames));
return opStructNames[op];
}
#endif
//
// We allocate tree nodes in 2 different sizes:
// - TREE_NODE_SZ_SMALL for most nodes
// - TREE_NODE_SZ_LARGE for the few nodes (such as calls) that have
// more fields and take up a lot more space.
//
/* GT_COUNT'th oper is overloaded as 'undefined oper', so allocate storage for GT_COUNT'th oper also */
/* static */
unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
unsigned char GenTree::s_gtTrueSizes[GT_COUNT + 1]{
#define GTNODE(en, st, cm, ok) sizeof(st),
#include "gtlist.h"
};
#endif // NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
#if COUNT_AST_OPERS
unsigned GenTree::s_gtNodeCounts[GT_COUNT + 1] = {0};
#endif // COUNT_AST_OPERS
/* static */
void GenTree::InitNodeSize()
{
/* Set all sizes to 'small' first */
for (unsigned op = 0; op <= GT_COUNT; op++)
{
GenTree::s_gtNodeSizes[op] = TREE_NODE_SZ_SMALL;
}
// Now set all of the appropriate entries to 'large'
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
if (GlobalJitOptions::compFeatureHfa
#if defined(UNIX_AMD64_ABI)
|| true
#endif // defined(UNIX_AMD64_ABI)
)
{
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
}
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FTN_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOUNDS_CHECK] = TREE_NODE_SZ_SMALL;
GenTree::s_gtNodeSizes[GT_ARR_ELEM] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
GenTree::s_gtNodeSizes[GT_DIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UDIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_MOD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UMOD] = TREE_NODE_SZ_LARGE;
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
GenTree::s_gtNodeSizes[GT_PUTARG_STK] = TREE_NODE_SZ_LARGE;
#if FEATURE_ARG_SPLIT
GenTree::s_gtNodeSizes[GT_PUTARG_SPLIT] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
assert(GenTree::s_gtNodeSizes[GT_RETURN] == GenTree::s_gtNodeSizes[GT_ASG]);
// This list of assertions should come to contain all GenTree subtypes that are declared
// "small".
assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]);
assert(sizeof(GenTreeLclVar) <= GenTree::s_gtNodeSizes[GT_LCL_VAR]);
static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCC) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFieldList) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndexAddr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
#ifndef FEATURE_PUT_STRUCT_ARG_STK
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
#else // FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
#if FEATURE_ARG_SPLIT
static_assert_no_msg(sizeof(GenTreePutArgSplit) <= TREE_NODE_SZ_LARGE);
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
#ifdef FEATURE_SIMD
static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static_assert_no_msg(sizeof(GenTreeHWIntrinsic) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_HW_INTRINSICS
// clang-format on
}
size_t GenTree::GetNodeSize() const
{
return GenTree::s_gtNodeSizes[gtOper];
}
#ifdef DEBUG
bool GenTree::IsNodeProperlySized() const
{
size_t size;
if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
{
size = TREE_NODE_SZ_SMALL;
}
else
{
assert(gtDebugFlags & GTF_DEBUG_NODE_LARGE);
size = TREE_NODE_SZ_LARGE;
}
return GenTree::s_gtNodeSizes[gtOper] <= size;
}
#endif
//------------------------------------------------------------------------
// ReplaceWith: replace this with the src node. The source must be an isolated node
// and cannot be used after the replacement.
//
// Arguments:
// src - source tree, that replaces this.
// comp - the compiler instance to transfer annotations for arrays.
//
void GenTree::ReplaceWith(GenTree* src, Compiler* comp)
{
// The source may be big only if the target is also a big node
assert((gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[src->gtOper] == TREE_NODE_SZ_SMALL);
// The check is effective only if nodes have been already threaded.
assert((src->gtPrev == nullptr) && (src->gtNext == nullptr));
RecordOperBashing(OperGet(), src->OperGet()); // nop unless NODEBASH_STATS is enabled
GenTree* prev = gtPrev;
GenTree* next = gtNext;
// The VTable pointer is copied intentionally here
memcpy((void*)this, (void*)src, src->GetNodeSize());
this->gtPrev = prev;
this->gtNext = next;
#ifdef DEBUG
gtSeqNum = 0;
#endif
// Transfer any annotations.
if (src->OperGet() == GT_IND && src->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
assert(b);
comp->GetArrayInfoMap()->Set(this, arrInfo);
}
DEBUG_DESTROY_NODE(src);
}
/*****************************************************************************
*
* When 'NODEBASH_STATS' is enabled in "jit.h" we record all instances of
* an existing GenTree node having its operator changed. This can be useful
* for two (related) things - to see what is being bashed (and what isn't),
* and to verify that the existing choices for what nodes are marked 'large'
* are reasonable (to minimize "wasted" space).
*
* And yes, the hash function / logic is simplistic, but it is conflict-free
* and transparent for what we need.
*/
#if NODEBASH_STATS
#define BASH_HASH_SIZE 211
inline unsigned hashme(genTreeOps op1, genTreeOps op2)
{
return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE;
}
struct BashHashDsc
{
unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
unsigned __int32 bhCount; // the same old->new bashings seen so far
unsigned __int8 bhOperOld; // original gtOper
unsigned __int8 bhOperNew; // new gtOper
};
static BashHashDsc BashHash[BASH_HASH_SIZE];
void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{
unsigned hash = hashme(operOld, operNew);
BashHashDsc* desc = BashHash + hash;
if (desc->bhFullHash != hash)
{
noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
desc->bhFullHash = hash;
}
desc->bhCount += 1;
desc->bhOperOld = operOld;
desc->bhOperNew = operNew;
}
void GenTree::ReportOperBashing(FILE* f)
{
unsigned total = 0;
fflush(f);
fprintf(f, "\n");
fprintf(f, "Bashed gtOper stats:\n");
fprintf(f, "\n");
fprintf(f, " Old operator New operator #bytes old->new Count\n");
fprintf(f, " ---------------------------------------------------------------\n");
for (unsigned h = 0; h < BASH_HASH_SIZE; h++)
{
unsigned count = BashHash[h].bhCount;
if (count == 0)
continue;
unsigned opOld = BashHash[h].bhOperOld;
unsigned opNew = BashHash[h].bhOperNew;
fprintf(f, " GT_%-13s -> GT_%-13s [size: %3u->%3u] %c %7u\n", OpName((genTreeOps)opOld),
OpName((genTreeOps)opNew), s_gtTrueSizes[opOld], s_gtTrueSizes[opNew],
(s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ', count);
total += count;
}
fprintf(f, "\n");
fprintf(f, "Total bashings: %u\n", total);
fprintf(f, "\n");
fflush(f);
}
#endif // NODEBASH_STATS
/*****************************************************************************/
#if MEASURE_NODE_SIZE
void GenTree::DumpNodeSizes(FILE* fp)
{
// Dump the sizes of the various GenTree flavors
fprintf(fp, "Small tree node size = %zu bytes\n", TREE_NODE_SZ_SMALL);
fprintf(fp, "Large tree node size = %zu bytes\n", TREE_NODE_SZ_LARGE);
fprintf(fp, "\n");
// Verify that node sizes are set kosherly and dump sizes
for (unsigned op = GT_NONE + 1; op < GT_COUNT; op++)
{
unsigned needSize = s_gtTrueSizes[op];
unsigned nodeSize = s_gtNodeSizes[op];
const char* structNm = OpStructName((genTreeOps)op);
const char* operName = OpName((genTreeOps)op);
bool repeated = false;
// Have we seen this struct flavor before?
for (unsigned mop = GT_NONE + 1; mop < op; mop++)
{
if (strcmp(structNm, OpStructName((genTreeOps)mop)) == 0)
{
repeated = true;
break;
}
}
// Don't repeat the same GenTree flavor unless we have an error
if (!repeated || needSize > nodeSize)
{
unsigned sizeChar = '?';
if (nodeSize == TREE_NODE_SZ_SMALL)
sizeChar = 'S';
else if (nodeSize == TREE_NODE_SZ_LARGE)
sizeChar = 'L';
fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName, structNm, needSize, sizeChar);
if (needSize > nodeSize)
{
fprintf(fp, " -- ERROR -- allocation is only %u bytes!", nodeSize);
}
else if (needSize <= TREE_NODE_SZ_SMALL && nodeSize == TREE_NODE_SZ_LARGE)
{
fprintf(fp, " ... could be small");
}
fprintf(fp, "\n");
}
}
}
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
*
* Walk all basic blocks and call the given function pointer for all tree
* nodes contained therein.
*/
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
}
}
//-----------------------------------------------------------
// CopyReg: Copy the _gtRegNum/gtRegTag fields.
//
// Arguments:
// from - GenTree node from which to copy
//
// Return Value:
// None
void GenTree::CopyReg(GenTree* from)
{
_gtRegNum = from->_gtRegNum;
INDEBUG(gtRegTag = from->gtRegTag;)
// Also copy multi-reg state if this is a call node
if (IsCall())
{
assert(from->IsCall());
this->AsCall()->CopyOtherRegs(from->AsCall());
}
else if (IsCopyOrReload())
{
this->AsCopyOrReload()->CopyOtherRegs(from->AsCopyOrReload());
}
}
//------------------------------------------------------------------
// gtHasReg: Whether node been assigned a register by LSRA
//
// Arguments:
// comp - Compiler instance. Required for multi-reg lcl var; ignored otherwise.
//
// Return Value:
// Returns true if the node was assigned a register.
//
// In case of multi-reg call nodes, it is considered having a reg if regs are allocated for ALL its
// return values.
// REVIEW: why is this ALL and the other cases are ANY? Explain.
//
// In case of GT_COPY or GT_RELOAD of a multi-reg call, GT_COPY/GT_RELOAD is considered having a reg if it
// has a reg assigned to ANY of its positions.
//
// In case of multi-reg local vars, it is considered having a reg if it has a reg assigned for ANY
// of its positions.
//
bool GenTree::gtHasReg(Compiler* comp) const
{
bool hasReg = false;
if (IsMultiRegCall())
{
const GenTreeCall* call = AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg call node is said to have regs, if it has
// reg assigned to each of its result registers.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (call->GetRegNumByIdx(i) != REG_NA);
if (!hasReg)
{
break;
}
}
}
else if (IsCopyOrReloadOfMultiRegCall())
{
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg copy or reload node is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (copyOrReload->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else if (IsMultiRegLclVar())
{
assert(comp != nullptr);
const GenTreeLclVar* lclNode = AsLclVar();
const unsigned regCount = GetMultiRegCount(comp);
// A Multi-reg local vars is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; i++)
{
hasReg = (lclNode->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else
{
hasReg = (GetRegNum() != REG_NA);
}
return hasReg;
}
//-----------------------------------------------------------------------------
// GetRegisterDstCount: Get the number of registers defined by the node.
//
// Arguments:
// None
//
// Return Value:
// The number of registers that this node defines.
//
// Notes:
// This should not be called on a contained node.
// This does not look at the actual register assignments, if any, and so
// is valid after Lowering.
//
int GenTree::GetRegisterDstCount(Compiler* compiler) const
{
assert(!isContained());
if (!IsMultiRegNode())
{
return (IsValue()) ? 1 : 0;
}
else if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
else if (IsCopyOrReload())
{
return gtGetOp1()->GetRegisterDstCount(compiler);
}
#if FEATURE_ARG_SPLIT
else if (OperIsPutArgSplit())
{
return (const_cast<GenTree*>(this))->AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
else if (OperIsMultiRegOp())
{
// A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST.
// For the latter two (ARM-only), they only have multiple registers if they produce a long value
// (GT_MUL_LONG always produces a long value).
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
return (TypeGet() == TYP_LONG) ? 2 : 1;
#else
assert(OperIs(GT_MUL_LONG));
return 2;
#endif
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (OperIsHWIntrinsic())
{
assert(TypeIs(TYP_STRUCT));
const GenTreeHWIntrinsic* intrinsic = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = intrinsic->GetHWIntrinsicId();
assert(HWIntrinsicInfo::IsMultiReg(intrinsicId));
return HWIntrinsicInfo::GetMultiRegCount(intrinsicId);
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetFieldCount(compiler);
}
assert(!"Unexpected multi-reg node");
return 0;
}
//-----------------------------------------------------------------------------------
// IsMultiRegNode: whether a node returning its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi-reg node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
bool GenTree::IsMultiRegNode() const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return true;
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return true;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return true;
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return true;
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::IsMultiReg(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetMultiRegCount: Return the register count for a multi-reg node.
//
// Arguments:
// comp - Compiler instance. Required for MultiRegLclVar, unused otherwise.
//
// Return Value:
// Returns the number of registers defined by this node.
//
unsigned GenTree::GetMultiRegCount(Compiler* comp) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegCount();
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegCount();
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::GetMultiRegCount(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
assert(comp != nullptr);
return AsLclVar()->GetFieldCount(comp);
}
assert(!"GetMultiRegCount called with non-multireg node");
return 1;
}
//---------------------------------------------------------------
// gtGetContainedRegMask: Get the reg mask of the node including
// contained nodes (recursive).
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetContainedRegMask()
{
if (!isContained())
{
return gtGetRegMask();
}
regMaskTP mask = 0;
for (GenTree* operand : Operands())
{
mask |= operand->gtGetContainedRegMask();
}
return mask;
}
//---------------------------------------------------------------
// gtGetRegMask: Get the reg mask of the node.
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetRegMask() const
{
regMaskTP resultMask;
if (IsMultiRegCall())
{
resultMask = genRegMask(GetRegNum());
resultMask |= AsCall()->GetOtherRegMask();
}
else if (IsCopyOrReloadOfMultiRegCall())
{
// A multi-reg copy or reload, will have valid regs for only those
// positions that need to be copied or reloaded. Hence we need
// to consider only those registers for computing reg mask.
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = copyOrReload->GetRegNumByIdx(i);
if (reg != REG_NA)
{
resultMask |= genRegMask(reg);
}
}
}
#if FEATURE_ARG_SPLIT
else if (compFeatureArgSplit() && OperIsPutArgSplit())
{
const GenTreePutArgSplit* splitArg = AsPutArgSplit();
const unsigned regCount = splitArg->gtNumRegs;
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = splitArg->GetRegNumByIdx(i);
assert(reg != REG_NA);
resultMask |= genRegMask(reg);
}
}
#endif // FEATURE_ARG_SPLIT
else
{
resultMask = genRegMask(GetRegNum());
}
return resultMask;
}
void GenTreeFieldList::AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
}
void GenTreeFieldList::InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::InsertFieldLIR(
Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
}
//---------------------------------------------------------------
// GetOtherRegMask: Get the reg mask of gtOtherRegs of call node
//
// Arguments:
// None
//
// Return Value:
// Reg mask of gtOtherRegs of call node.
//
regMaskTP GenTreeCall::GetOtherRegMask() const
{
regMaskTP resultMask = RBM_NONE;
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
if (gtOtherRegs[i] != REG_NA)
{
resultMask |= genRegMask((regNumber)gtOtherRegs[i]);
continue;
}
break;
}
#endif
return resultMask;
}
//-------------------------------------------------------------------------
// IsPure:
// Returns true if this call is pure. For now, this uses the same
// definition of "pure" that is that used by HelperCallProperties: a
// pure call does not read or write any aliased (e.g. heap) memory or
// have other global side effects (e.g. class constructors, finalizers),
// but is allowed to throw an exception.
//
// NOTE: this call currently only returns true if the call target is a
// helper method that is known to be pure. No other analysis is
// performed.
//
// Arguments:
// Copiler - the compiler context.
//
// Returns:
// True if the call is pure; false otherwise.
//
bool GenTreeCall::IsPure(Compiler* compiler) const
{
return (gtCallType == CT_HELPER) &&
compiler->s_helperCallProperties.IsPure(compiler->eeGetHelperNum(gtCallMethHnd));
}
//-------------------------------------------------------------------------
// HasSideEffects:
// Returns true if this call has any side effects. All non-helpers are considered to have side-effects. Only helpers
// that do not mutate the heap, do not run constructors, may not throw, and are either a) pure or b) non-finalizing
// allocation functions are considered side-effect-free.
//
// Arguments:
// compiler - the compiler instance
// ignoreExceptions - when `true`, ignores exception side effects
// ignoreCctors - when `true`, ignores class constructor side effects
//
// Return Value:
// true if this call has any side-effects; false otherwise.
bool GenTreeCall::HasSideEffects(Compiler* compiler, bool ignoreExceptions, bool ignoreCctors) const
{
// Generally all GT_CALL nodes are considered to have side-effects, but we may have extra information about helper
// calls that can prove them side-effect-free.
if (gtCallType != CT_HELPER)
{
return true;
}
CorInfoHelpFunc helper = compiler->eeGetHelperNum(gtCallMethHnd);
HelperCallProperties& helperProperties = compiler->s_helperCallProperties;
// We definitely care about the side effects if MutatesHeap is true
if (helperProperties.MutatesHeap(helper))
{
return true;
}
// Unless we have been instructed to ignore cctors (CSE, for example, ignores cctors), consider them side effects.
if (!ignoreCctors && helperProperties.MayRunCctor(helper))
{
return true;
}
// If we also care about exceptions then check if the helper can throw
if (!ignoreExceptions && !helperProperties.NoThrow(helper))
{
return true;
}
// If this is not a Pure helper call or an allocator (that will not need to run a finalizer)
// then this call has side effects.
return !helperProperties.IsPure(helper) &&
(!helperProperties.IsAllocator(helper) || ((gtCallMoreFlags & GTF_CALL_M_ALLOC_SIDE_EFFECTS) != 0));
}
//-------------------------------------------------------------------------
// HasNonStandardAddedArgs: Return true if the method has non-standard args added to the call
// argument list during argument morphing (fgMorphArgs), e.g., passed in R10 or R11 on AMD64.
// See also GetNonStandardAddedArgCount().
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// true if there are any such args, false otherwise.
//
bool GenTreeCall::HasNonStandardAddedArgs(Compiler* compiler) const
{
return GetNonStandardAddedArgCount(compiler) != 0;
}
//-------------------------------------------------------------------------
// GetNonStandardAddedArgCount: Get the count of non-standard arguments that have been added
// during call argument morphing (fgMorphArgs). Do not count non-standard args that are already
// counted in the argument list prior to morphing.
//
// This function is used to help map the caller and callee arguments during tail call setup.
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// The count of args, as described.
//
// Notes:
// It would be more general to have fgMorphArgs set a bit on the call node when such
// args are added to a call, and a bit on each such arg, and then have this code loop
// over the call args when the special call bit is set, counting the args with the special
// arg bit. This seems pretty heavyweight, though. Instead, this logic needs to be kept
// in sync with fgMorphArgs.
//
int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
{
if (IsUnmanaged() && !compiler->opts.ShouldUsePInvokeHelpers())
{
// R11 = PInvoke cookie param
return 1;
}
else if (IsVirtualStub())
{
// R11 = Virtual stub param
return 1;
}
else if ((gtCallType == CT_INDIRECT) && (gtCallCookie != nullptr))
{
// R10 = PInvoke target param
// R11 = PInvoke cookie param
return 2;
}
return 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
// These two Jit Helpers that we handle here by returning true
// aren't actually defined to return a struct, so they don't expect
// their RetBuf to be passed in x8, instead they expect it in x0.
//
bool GenTreeCall::TreatAsHasRetBufArg(Compiler* compiler) const
{
if (HasRetBufArg())
{
return true;
}
else
{
// If we see a Jit helper call that returns a TYP_STRUCT we will
// transform it as if it has a Return Buffer Argument
//
if (IsHelperCall() && (gtReturnType == TYP_STRUCT))
{
// There are two possible helper calls that use this path:
// CORINFO_HELP_GETFIELDSTRUCT and CORINFO_HELP_UNBOX_NULLABLE
//
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(gtCallMethHnd);
if (helpFunc == CORINFO_HELP_GETFIELDSTRUCT)
{
return true;
}
else if (helpFunc == CORINFO_HELP_UNBOX_NULLABLE)
{
return true;
}
else
{
assert(!"Unexpected JIT helper in TreatAsHasRetBufArg");
}
}
}
return false;
}
//-------------------------------------------------------------------------
// IsHelperCall: Determine if this GT_CALL node is a specific helper call.
//
// Arguments:
// compiler - the compiler instance so that we can call eeFindHelper
//
// Return Value:
// Returns true if this GT_CALL node is a call to the specified helper.
//
bool GenTreeCall::IsHelperCall(Compiler* compiler, unsigned helper) const
{
return IsHelperCall(compiler->eeFindHelper(helper));
}
//------------------------------------------------------------------------
// GenTreeCall::ReplaceCallOperand:
// Replaces a given operand to a call node and updates the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTreeCall::ReplaceCallOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
GenTree* originalOperand = *useEdge;
*useEdge = replacement;
const bool isArgument =
(replacement != gtControlExpr) &&
((gtCallType != CT_INDIRECT) || ((replacement != gtCallCookie) && (replacement != gtCallAddr)));
if (isArgument)
{
if ((originalOperand->gtFlags & GTF_LATE_ARG) != 0)
{
replacement->gtFlags |= GTF_LATE_ARG;
}
else
{
assert((replacement->gtFlags & GTF_LATE_ARG) == 0);
fgArgTabEntry* fp = Compiler::gtArgEntryByNode(this, replacement);
assert(fp->GetNode() == replacement);
}
}
}
//-------------------------------------------------------------------------
// AreArgsComplete: Determine if this GT_CALL node's arguments have been processed.
//
// Return Value:
// Returns true if fgMorphArgs has processed the arguments.
//
bool GenTreeCall::AreArgsComplete() const
{
if (fgArgInfo == nullptr)
{
return false;
}
if (fgArgInfo->AreArgsComplete())
{
assert((gtCallLateArgs != nullptr) || !fgArgInfo->HasRegArgs());
return true;
}
#if defined(FEATURE_FASTTAILCALL)
// If we have FEATURE_FASTTAILCALL, 'fgCanFastTailCall()' can call 'fgInitArgInfo()', and in that
// scenario it is valid to have 'fgArgInfo' be non-null when 'fgMorphArgs()' first queries this,
// when it hasn't yet morphed the arguments.
#else
assert(gtCallArgs == nullptr);
#endif
return false;
}
//--------------------------------------------------------------------------
// Equals: Check if 2 CALL nodes are equal.
//
// Arguments:
// c1 - The first call node
// c2 - The second call node
//
// Return Value:
// true if the 2 CALL nodes have the same type and operands
//
bool GenTreeCall::Equals(GenTreeCall* c1, GenTreeCall* c2)
{
assert(c1->OperGet() == c2->OperGet());
if (c1->TypeGet() != c2->TypeGet())
{
return false;
}
if (c1->gtCallType != c2->gtCallType)
{
return false;
}
if (c1->gtCallType != CT_INDIRECT)
{
if (c1->gtCallMethHnd != c2->gtCallMethHnd)
{
return false;
}
#ifdef FEATURE_READYTORUN
if (c1->gtEntryPoint.addr != c2->gtEntryPoint.addr)
{
return false;
}
#endif
}
else
{
if (!Compare(c1->gtCallAddr, c2->gtCallAddr))
{
return false;
}
}
if ((c1->gtCallThisArg != nullptr) != (c2->gtCallThisArg != nullptr))
{
return false;
}
if ((c1->gtCallThisArg != nullptr) && !Compare(c1->gtCallThisArg->GetNode(), c2->gtCallThisArg->GetNode()))
{
return false;
}
GenTreeCall::UseIterator i1 = c1->Args().begin();
GenTreeCall::UseIterator end1 = c1->Args().end();
GenTreeCall::UseIterator i2 = c2->Args().begin();
GenTreeCall::UseIterator end2 = c2->Args().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
i1 = c1->LateArgs().begin();
end1 = c1->LateArgs().end();
i2 = c2->LateArgs().begin();
end2 = c2->LateArgs().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
if (!Compare(c1->gtControlExpr, c2->gtControlExpr))
{
return false;
}
return true;
}
//--------------------------------------------------------------------------
// ResetArgInfo: The argument info needs to be reset so it can be recomputed based on some change
// in conditions, such as changing the return type of a call due to giving up on doing a tailcall.
// If there is no fgArgInfo computed yet for this call, then there is nothing to reset.
//
void GenTreeCall::ResetArgInfo()
{
if (fgArgInfo == nullptr)
{
return;
}
// We would like to just set `fgArgInfo = nullptr`. But `fgInitArgInfo()` not
// only sets up fgArgInfo, it also adds non-standard args to the IR, and we need
// to remove that extra IR so it doesn't get added again.
//
unsigned argNum = 0;
if (gtCallThisArg != nullptr)
{
argNum++;
}
Use** link = >CallArgs;
while ((*link) != nullptr)
{
const fgArgTabEntry* entry = fgArgInfo->GetArgEntry(argNum);
if (entry->isNonStandard() && entry->isNonStandardArgAddedLate())
{
JITDUMP("Removing non-standarg arg %s [%06u] to prepare for re-morphing call [%06u]\n",
getNonStandardArgKindName(entry->nonStandardArgKind), Compiler::dspTreeID((*link)->GetNode()),
gtTreeID);
*link = (*link)->GetNext();
}
else
{
link = &(*link)->NextRef();
}
argNum++;
}
fgArgInfo = nullptr;
}
#if !defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned GenTreePutArgStk::GetStackByteSize() const
{
return genTypeSize(genActualType(gtOp1->gtType));
}
#endif // !defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Returns non-zero if the two trees are identical.
*/
bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
{
genTreeOps oper;
unsigned kind;
// printf("tree1:\n"); gtDispTree(op1);
// printf("tree2:\n"); gtDispTree(op2);
AGAIN:
if (op1 == nullptr)
{
return (op2 == nullptr);
}
if (op2 == nullptr)
{
return false;
}
if (op1 == op2)
{
return true;
}
oper = op1->OperGet();
/* The operators must be equal */
if (oper != op2->gtOper)
{
return false;
}
/* The types must be equal */
if (op1->gtType != op2->gtType)
{
return false;
}
/* Overflow must be equal */
if (op1->gtOverflowEx() != op2->gtOverflowEx())
{
return false;
}
/* Sensible flags must be equal */
if ((op1->gtFlags & (GTF_UNSIGNED)) != (op2->gtFlags & (GTF_UNSIGNED)))
{
return false;
}
/* Figure out what kind of nodes we're comparing */
kind = op1->OperKind();
/* Is this a constant node? */
if (op1->OperIsConst())
{
switch (oper)
{
case GT_CNS_INT:
if (op1->AsIntCon()->gtIconVal == op2->AsIntCon()->gtIconVal)
{
return true;
}
break;
case GT_CNS_STR:
if ((op1->AsStrCon()->gtSconCPX == op2->AsStrCon()->gtSconCPX) &&
(op1->AsStrCon()->gtScpHnd == op2->AsStrCon()->gtScpHnd))
{
return true;
}
break;
#if 0
// TODO-CQ: Enable this in the future
case GT_CNS_LNG:
if (op1->AsLngCon()->gtLconVal == op2->AsLngCon()->gtLconVal)
return true;
break;
case GT_CNS_DBL:
if (op1->AsDblCon()->gtDconVal == op2->AsDblCon()->gtDconVal)
return true;
break;
#endif
default:
break;
}
return false;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_LCL_VAR:
if (op1->AsLclVarCommon()->GetLclNum() != op2->AsLclVarCommon()->GetLclNum())
{
break;
}
return true;
case GT_LCL_FLD:
if ((op1->AsLclFld()->GetLclNum() != op2->AsLclFld()->GetLclNum()) ||
(op1->AsLclFld()->GetLclOffs() != op2->AsLclFld()->GetLclOffs()))
{
break;
}
return true;
case GT_CLS_VAR:
if (op1->AsClsVar()->gtClsVarHnd != op2->AsClsVar()->gtClsVarHnd)
{
break;
}
return true;
case GT_LABEL:
return true;
case GT_ARGPLACE:
if ((op1->gtType == TYP_STRUCT) &&
(op1->AsArgPlace()->gtArgPlaceClsHnd != op2->AsArgPlace()->gtArgPlaceClsHnd))
{
break;
}
return true;
default:
break;
}
return false;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_UNOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the comparison.
switch (oper)
{
case GT_ARR_LENGTH:
if (op1->AsArrLen()->ArrLenOffset() != op2->AsArrLen()->ArrLenOffset())
{
return false;
}
break;
case GT_CAST:
if (op1->AsCast()->gtCastType != op2->AsCast()->gtCastType)
{
return false;
}
break;
case GT_BLK:
case GT_OBJ:
if (op1->AsBlk()->GetLayout() != op2->AsBlk()->GetLayout())
{
return false;
}
break;
case GT_FIELD:
if (op1->AsField()->gtFldHnd != op2->AsField()->gtFldHnd)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
case GT_RUNTIMELOOKUP:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
return Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1);
}
if (kind & GTK_BINOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
if (op1->AsIntrinsic()->gtIntrinsicName != op2->AsIntrinsic()->gtIntrinsicName)
{
return false;
}
break;
case GT_LEA:
if (op1->AsAddrMode()->gtScale != op2->AsAddrMode()->gtScale)
{
return false;
}
if (op1->AsAddrMode()->Offset() != op2->AsAddrMode()->Offset())
{
return false;
}
break;
case GT_BOUNDS_CHECK:
if (op1->AsBoundsChk()->gtThrowKind != op2->AsBoundsChk()->gtThrowKind)
{
return false;
}
break;
case GT_INDEX:
if (op1->AsIndex()->gtIndElemSize != op2->AsIndex()->gtIndElemSize)
{
return false;
}
break;
case GT_INDEX_ADDR:
if (op1->AsIndexAddr()->gtElemSize != op2->AsIndexAddr()->gtElemSize)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_QMARK:
break;
default:
assert(!"unexpected binary ExOp operator");
}
}
if (op1->AsOp()->gtOp2)
{
if (!Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1, swapOK))
{
if (swapOK && OperIsCommutative(oper) &&
((op1->AsOp()->gtOp1->gtFlags | op1->AsOp()->gtOp2->gtFlags | op2->AsOp()->gtOp1->gtFlags |
op2->AsOp()->gtOp2->gtFlags) &
GTF_ALL_EFFECT) == 0)
{
if (Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp2, swapOK))
{
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
}
return false;
}
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
}
else
{
op1 = op1->AsOp()->gtOp1;
op2 = op2->AsOp()->gtOp1;
if (!op1)
{
return (op2 == nullptr);
}
if (!op2)
{
return false;
}
goto AGAIN;
}
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
return GenTreeCall::Equals(op1->AsCall(), op2->AsCall());
#ifdef FEATURE_SIMD
case GT_SIMD:
return GenTreeSIMD::Equals(op1->AsSIMD(), op2->AsSIMD());
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
return GenTreeHWIntrinsic::Equals(op1->AsHWIntrinsic(), op2->AsHWIntrinsic());
#endif
case GT_ARR_ELEM:
if (op1->AsArrElem()->gtArrRank != op2->AsArrElem()->gtArrRank)
{
return false;
}
// NOTE: gtArrElemSize may need to be handled
unsigned dim;
for (dim = 0; dim < op1->AsArrElem()->gtArrRank; dim++)
{
if (!Compare(op1->AsArrElem()->gtArrInds[dim], op2->AsArrElem()->gtArrInds[dim]))
{
return false;
}
}
op1 = op1->AsArrElem()->gtArrObj;
op2 = op2->AsArrElem()->gtArrObj;
goto AGAIN;
case GT_ARR_OFFSET:
if (op1->AsArrOffs()->gtCurrDim != op2->AsArrOffs()->gtCurrDim ||
op1->AsArrOffs()->gtArrRank != op2->AsArrOffs()->gtArrRank)
{
return false;
}
return (Compare(op1->AsArrOffs()->gtOffset, op2->AsArrOffs()->gtOffset) &&
Compare(op1->AsArrOffs()->gtIndex, op2->AsArrOffs()->gtIndex) &&
Compare(op1->AsArrOffs()->gtArrObj, op2->AsArrOffs()->gtArrObj));
case GT_PHI:
return GenTreePhi::Equals(op1->AsPhi(), op2->AsPhi());
case GT_FIELD_LIST:
return GenTreeFieldList::Equals(op1->AsFieldList(), op2->AsFieldList());
case GT_CMPXCHG:
return Compare(op1->AsCmpXchg()->gtOpLocation, op2->AsCmpXchg()->gtOpLocation) &&
Compare(op1->AsCmpXchg()->gtOpValue, op2->AsCmpXchg()->gtOpValue) &&
Compare(op1->AsCmpXchg()->gtOpComparand, op2->AsCmpXchg()->gtOpComparand);
case GT_STORE_DYN_BLK:
return Compare(op1->AsStoreDynBlk()->Addr(), op2->AsStoreDynBlk()->Addr()) &&
Compare(op1->AsStoreDynBlk()->Data(), op2->AsStoreDynBlk()->Data()) &&
Compare(op1->AsStoreDynBlk()->gtDynamicSize, op2->AsStoreDynBlk()->gtDynamicSize);
default:
assert(!"unexpected operator");
}
return false;
}
//------------------------------------------------------------------------
// gtHasRef: Find out whether the given tree contains a local/field.
//
// Arguments:
// tree - tree to find the local in
// lclNum - the local's number, *or* the handle for the field
//
// Return Value:
// Whether "tree" has any LCL_VAR/LCL_FLD nodes that refer to the
// local, LHS or RHS, or FIELD nodes with the specified handle.
//
// Notes:
// Does not pay attention to local address nodes.
//
/* static */ bool Compiler::gtHasRef(GenTree* tree, ssize_t lclNum)
{
if (tree == nullptr)
{
return false;
}
if (tree->OperIsLeaf())
{
if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD) && (tree->AsLclVarCommon()->GetLclNum() == (unsigned)lclNum))
{
return true;
}
if (tree->OperIs(GT_RET_EXPR))
{
return gtHasRef(tree->AsRetExpr()->gtInlineCandidate, lclNum);
}
return false;
}
if (tree->OperIsUnary())
{
// Code in importation (see CEE_STFLD in impImportBlockCode), when
// spilling, can pass us "lclNum" that is actually a field handle...
if (tree->OperIs(GT_FIELD) && (lclNum == (ssize_t)tree->AsField()->gtFldHnd))
{
return true;
}
return gtHasRef(tree->AsUnOp()->gtGetOp1(), lclNum);
}
if (tree->OperIsBinary())
{
return gtHasRef(tree->AsOp()->gtGetOp1(), lclNum) || gtHasRef(tree->AsOp()->gtGetOp2(), lclNum);
}
bool result = false;
tree->VisitOperands([lclNum, &result](GenTree* operand) -> GenTree::VisitResult {
if (gtHasRef(operand, lclNum))
{
result = true;
return GenTree::VisitResult::Abort;
}
return GenTree::VisitResult::Continue;
});
return result;
}
struct AddrTakenDsc
{
Compiler* comp;
bool hasAddrTakenLcl;
};
/* static */
Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
Compiler* comp = data->compiler;
if (tree->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
((AddrTakenDsc*)data->pCallbackData)->hasAddrTakenLcl = true;
return WALK_ABORT;
}
}
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Return true if this tree contains locals with lvHasLdAddrOp or IsAddressExposed()
* flag(s) set.
*/
bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree)
{
AddrTakenDsc desc;
desc.comp = this;
desc.hasAddrTakenLcl = false;
fgWalkTreePre(&tree, gtHasLocalsWithAddrOpCB, &desc);
return desc.hasAddrTakenLcl;
}
#ifdef DEBUG
/*****************************************************************************
*
* Helper used to compute hash values for trees.
*/
inline unsigned genTreeHashAdd(unsigned old, unsigned add)
{
return (old + old / 2) ^ add;
}
inline unsigned genTreeHashAdd(unsigned old, void* add)
{
return genTreeHashAdd(old, (unsigned)(size_t)add);
}
/*****************************************************************************
*
* Given an arbitrary expression tree, compute a hash value for it.
*/
unsigned Compiler::gtHashValue(GenTree* tree)
{
genTreeOps oper;
unsigned kind;
unsigned hash = 0;
GenTree* temp;
AGAIN:
assert(tree);
/* Figure out what kind of a node we have */
oper = tree->OperGet();
kind = tree->OperKind();
/* Include the operator value in the hash */
hash = genTreeHashAdd(hash, oper);
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
size_t add;
switch (oper)
{
UINT64 bits;
case GT_LCL_VAR:
add = tree->AsLclVar()->GetLclNum();
break;
case GT_LCL_FLD:
hash = genTreeHashAdd(hash, tree->AsLclFld()->GetLclNum());
add = tree->AsLclFld()->GetLclOffs();
break;
case GT_CNS_INT:
add = tree->AsIntCon()->gtIconVal;
break;
case GT_CNS_LNG:
bits = (UINT64)tree->AsLngCon()->gtLconVal;
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_DBL:
bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal);
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_STR:
add = tree->AsStrCon()->gtSconCPX;
break;
case GT_JMP:
add = tree->AsVal()->gtVal1;
break;
default:
add = 0;
break;
}
// clang-format off
// narrow 'add' into a 32-bit 'val'
unsigned val;
#ifdef HOST_64BIT
val = genTreeHashAdd(uhi32(add), ulo32(add));
#else // 32-bit host
val = add;
#endif
// clang-format on
hash = genTreeHashAdd(hash, val);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
GenTree* op1;
if (kind & GTK_UNOP)
{
op1 = tree->AsOp()->gtOp1;
/* Special case: no sub-operand at all */
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_ARR_LENGTH:
hash += tree->AsArrLen()->ArrLenOffset();
break;
case GT_CAST:
hash ^= tree->AsCast()->gtCastType;
break;
case GT_INDEX:
hash += tree->AsIndex()->gtIndElemSize;
break;
case GT_INDEX_ADDR:
hash += tree->AsIndexAddr()->gtElemSize;
break;
case GT_ALLOCOBJ:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsAllocObj()->gtAllocObjClsHnd)));
hash = genTreeHashAdd(hash, tree->AsAllocObj()->gtNewHelper);
break;
case GT_RUNTIMELOOKUP:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsRuntimeLookup()->gtHnd)));
break;
case GT_BLK:
case GT_OBJ:
hash =
genTreeHashAdd(hash,
static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->AsBlk()->GetLayout())));
break;
case GT_FIELD:
hash = genTreeHashAdd(hash, tree->AsField()->gtFldHnd);
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
if (!op1)
{
goto DONE;
}
tree = op1;
goto AGAIN;
}
if (kind & GTK_BINOP)
{
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
hash += tree->AsIntrinsic()->gtIntrinsicName;
break;
case GT_LEA:
hash += static_cast<unsigned>(tree->AsAddrMode()->Offset() << 3) + tree->AsAddrMode()->gtScale;
break;
case GT_BOUNDS_CHECK:
hash = genTreeHashAdd(hash, tree->AsBoundsChk()->gtThrowKind);
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
hash ^= PtrToUlong(tree->AsBlk()->GetLayout());
break;
// For the ones below no extra argument matters for comparison.
case GT_ARR_INDEX:
case GT_QMARK:
case GT_INDEX:
case GT_INDEX_ADDR:
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
hash += tree->AsSIMD()->GetSIMDIntrinsicId();
hash += tree->AsSIMD()->GetSimdBaseType();
hash += tree->AsSIMD()->GetSimdSize();
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
hash += tree->AsHWIntrinsic()->GetHWIntrinsicId();
hash += tree->AsHWIntrinsic()->GetSimdBaseType();
hash += tree->AsHWIntrinsic()->GetSimdSize();
hash += tree->AsHWIntrinsic()->GetAuxiliaryType();
hash += tree->AsHWIntrinsic()->GetOtherReg();
break;
#endif // FEATURE_HW_INTRINSICS
default:
assert(!"unexpected binary ExOp operator");
}
}
op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
/* Is there a second sub-operand? */
if (!op2)
{
/* Special case: no sub-operands at all */
if (!op1)
{
goto DONE;
}
/* This is a unary operator */
tree = op1;
goto AGAIN;
}
/* This is a binary operator */
unsigned hsh1 = gtHashValue(op1);
/* Add op1's hash to the running value and continue with op2 */
hash = genTreeHashAdd(hash, hsh1);
tree = op2;
goto AGAIN;
}
/* See what kind of a special operator we have here */
switch (tree->gtOper)
{
case GT_ARR_ELEM:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrObj));
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrInds[dim]));
}
break;
case GT_ARR_OFFSET:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtOffset));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtIndex));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtArrObj));
break;
case GT_CALL:
if ((tree->AsCall()->gtCallThisArg != nullptr) && !tree->AsCall()->gtCallThisArg->GetNode()->OperIs(GT_NOP))
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCall()->gtCallThisArg->GetNode()));
}
for (GenTreeCall::Use& use : tree->AsCall()->Args())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
temp = tree->AsCall()->gtCallAddr;
assert(temp);
hash = genTreeHashAdd(hash, gtHashValue(temp));
}
else
{
hash = genTreeHashAdd(hash, tree->AsCall()->gtCallMethHnd);
}
for (GenTreeCall::Use& use : tree->AsCall()->LateArgs())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
// TODO-List: rewrite with a general visitor / iterator?
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
hash = genTreeHashAdd(hash, gtHashValue(operand));
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_CMPXCHG:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpLocation));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpValue));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpComparand));
break;
case GT_STORE_DYN_BLK:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Data()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Addr()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->gtDynamicSize));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
assert(!"unexpected operator");
break;
}
DONE:
return hash;
}
#endif // DEBUG
/*****************************************************************************
*
* Return a relational operator that is the reverse of the given one.
*/
/* static */
genTreeOps GenTree::ReverseRelop(genTreeOps relop)
{
static const genTreeOps reverseOps[] = {
GT_NE, // GT_EQ
GT_EQ, // GT_NE
GT_GE, // GT_LT
GT_GT, // GT_LE
GT_LT, // GT_GE
GT_LE, // GT_GT
GT_TEST_NE, // GT_TEST_EQ
GT_TEST_EQ, // GT_TEST_NE
};
assert(reverseOps[GT_EQ - GT_EQ] == GT_NE);
assert(reverseOps[GT_NE - GT_EQ] == GT_EQ);
assert(reverseOps[GT_LT - GT_EQ] == GT_GE);
assert(reverseOps[GT_LE - GT_EQ] == GT_GT);
assert(reverseOps[GT_GE - GT_EQ] == GT_LT);
assert(reverseOps[GT_GT - GT_EQ] == GT_LE);
assert(reverseOps[GT_TEST_EQ - GT_EQ] == GT_TEST_NE);
assert(reverseOps[GT_TEST_NE - GT_EQ] == GT_TEST_EQ);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(reverseOps));
return reverseOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Return a relational operator that will work for swapped operands.
*/
/* static */
genTreeOps GenTree::SwapRelop(genTreeOps relop)
{
static const genTreeOps swapOps[] = {
GT_EQ, // GT_EQ
GT_NE, // GT_NE
GT_GT, // GT_LT
GT_GE, // GT_LE
GT_LE, // GT_GE
GT_LT, // GT_GT
GT_TEST_EQ, // GT_TEST_EQ
GT_TEST_NE, // GT_TEST_NE
};
assert(swapOps[GT_EQ - GT_EQ] == GT_EQ);
assert(swapOps[GT_NE - GT_EQ] == GT_NE);
assert(swapOps[GT_LT - GT_EQ] == GT_GT);
assert(swapOps[GT_LE - GT_EQ] == GT_GE);
assert(swapOps[GT_GE - GT_EQ] == GT_LE);
assert(swapOps[GT_GT - GT_EQ] == GT_LT);
assert(swapOps[GT_TEST_EQ - GT_EQ] == GT_TEST_EQ);
assert(swapOps[GT_TEST_NE - GT_EQ] == GT_TEST_NE);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(swapOps));
return swapOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Reverse the meaning of the given test condition.
*/
GenTree* Compiler::gtReverseCond(GenTree* tree)
{
if (tree->OperIsCompare())
{
tree->SetOper(GenTree::ReverseRelop(tree->OperGet()));
// Flip the GTF_RELOP_NAN_UN bit
// a ord b === (a != NaN && b != NaN)
// a unord b === (a == NaN || b == NaN)
// => !(a ord b) === (a unord b)
if (varTypeIsFloating(tree->AsOp()->gtOp1->TypeGet()))
{
tree->gtFlags ^= GTF_RELOP_NAN_UN;
}
}
else if (tree->OperIs(GT_JCC, GT_SETCC))
{
GenTreeCC* cc = tree->AsCC();
cc->gtCondition = GenCondition::Reverse(cc->gtCondition);
}
else if (tree->OperIs(GT_JCMP))
{
// Flip the GTF_JCMP_EQ
//
// This causes switching
// cbz <=> cbnz
// tbz <=> tbnz
tree->gtFlags ^= GTF_JCMP_EQ;
}
else
{
tree = gtNewOperNode(GT_NOT, TYP_INT, tree);
}
return tree;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
//------------------------------------------------------------------------------
// IsValidLongMul : Check for long multiplication with 32 bit operands.
//
// Recognizes the following tree: MUL(CAST(long <- int), CAST(long <- int) or CONST),
// where CONST must be an integer constant that fits in 32 bits. Will try to detect
// cases when the multiplication cannot overflow and return "true" for them.
//
// This function does not change the state of the tree and is usable in LIR.
//
// Return Value:
// Whether this GT_MUL tree is a valid long multiplication candidate.
//
bool GenTreeOp::IsValidLongMul()
{
assert(OperIs(GT_MUL));
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
if (!TypeIs(TYP_LONG))
{
return false;
}
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())))
{
return false;
}
if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
!(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
{
return false;
}
if (op1->gtOverflow() || op2->gtOverflowEx())
{
return false;
}
if (gtOverflow())
{
auto getMaxValue = [this](GenTree* op) -> int64_t {
if (op->OperIs(GT_CAST))
{
if (op->IsUnsigned())
{
switch (op->AsCast()->CastOp()->TypeGet())
{
case TYP_UBYTE:
return UINT8_MAX;
case TYP_USHORT:
return UINT16_MAX;
default:
return UINT32_MAX;
}
}
return IsUnsigned() ? static_cast<int64_t>(UINT64_MAX) : INT32_MIN;
}
return op->AsIntConCommon()->IntegralValue();
};
int64_t maxOp1 = getMaxValue(op1);
int64_t maxOp2 = getMaxValue(op2);
if (CheckedOps::MulOverflows(maxOp1, maxOp2, IsUnsigned()))
{
return false;
}
}
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
return false;
}
return true;
}
#if !defined(TARGET_64BIT) && defined(DEBUG)
//------------------------------------------------------------------------------
// DebugCheckLongMul : Checks that a GTF_MUL_64RSLT tree is a valid MUL_LONG.
//
// Notes:
// This function is defined for 32 bit targets only because we *must* maintain
// the MUL_LONG-compatible tree shape throughout the compilation from morph to
// decomposition, since we do not have (great) ability to create new calls in LIR.
//
// It is for this reason that we recognize MUL_LONGs early in morph, mark them with
// a flag and then pessimize various places (e. g. assertion propagation) to not look
// at them. In contrast, on ARM64 we recognize MUL_LONGs late, in lowering, and thus
// do not need this function.
//
void GenTreeOp::DebugCheckLongMul()
{
assert(OperIs(GT_MUL));
assert(Is64RsltMul());
assert(TypeIs(TYP_LONG));
assert(!gtOverflow());
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
// op1 has to be CAST(long <- int)
assert(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp()));
assert(!op1->gtOverflow());
// op2 has to be CAST(long <- int) or a suitably small constant.
assert((op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) ||
(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())));
assert(!op2->gtOverflowEx());
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
assert((op1ZeroExtends == op2ZeroExtends) || op2AnyExtensionIsSuitable);
// Do unsigned mul iff both operands are zero-extending.
assert(op1->IsUnsigned() == IsUnsigned());
}
#endif // !defined(TARGET_64BIT) && defined(DEBUG)
#endif // !defined(TARGET_64BIT) || defined(TARGET_ARM64)
unsigned Compiler::gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz)
{
unsigned level = 0;
unsigned costEx = 0;
unsigned costSz = 0;
for (GenTreeCall::Use& use : args)
{
GenTree* argNode = use.GetNode();
unsigned argLevel = gtSetEvalOrder(argNode);
if (argLevel > level)
{
level = argLevel;
}
if (argNode->GetCostEx() != 0)
{
costEx += argNode->GetCostEx();
costEx += lateArgs ? 0 : IND_COST_EX;
}
if (argNode->GetCostSz() != 0)
{
costSz += argNode->GetCostSz();
#ifdef TARGET_XARCH
if (lateArgs) // push is smaller than mov to reg
#endif
{
costSz += 1;
}
}
}
*callCostEx += costEx;
*callCostSz += costSz;
return level;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// gtSetMultiOpOrder: Calculate the costs for a MultiOp.
//
// Currently this function just preserves the previous behavior.
// TODO-List-Cleanup: implement proper costing for these trees.
//
// Arguments:
// multiOp - The MultiOp tree in question
//
// Return Value:
// The Sethi "complexity" for this tree (the idealized number of
// registers needed to evaluate it).
//
unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
{
// These default costs preserve previous behavior.
// TODO-CQ: investigate opportunities for tuning them.
int costEx = 1;
int costSz = 1;
unsigned level = 0;
unsigned lvl2 = 0;
#if defined(FEATURE_HW_INTRINSICS)
if (multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hwTree = multiOp->AsHWIntrinsic();
#if defined(TARGET_XARCH)
if ((hwTree->GetOperandCount() == 1) && hwTree->OperIsMemoryLoadOrStore())
{
costEx = IND_COST_EX;
costSz = 2;
GenTree* const addrNode = hwTree->Op(1);
level = gtSetEvalOrder(addrNode);
GenTree* const addr = addrNode->gtEffectiveVal();
// See if we can form a complex addressing mode.
if (addr->OperIs(GT_ADD) && gtMarkAddrMode(addr, &costEx, &costSz, hwTree->TypeGet()))
{
// Nothing to do, costs have been set.
}
else
{
costEx += addr->GetCostEx();
costSz += addr->GetCostSz();
}
hwTree->SetCosts(costEx, costSz);
return level;
}
#endif
switch (hwTree->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_Vector128_Create:
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
case NI_Vector128_Create:
#endif
{
if ((hwTree->GetOperandCount() == 1) && hwTree->Op(1)->OperIsConst())
{
// Vector.Create(cns) is cheap but not that cheap to be (1,1)
costEx = IND_COST_EX;
costSz = 2;
level = gtSetEvalOrder(hwTree->Op(1));
hwTree->SetCosts(costEx, costSz);
return level;
}
break;
}
default:
break;
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// This code is here to preserve previous behavior.
switch (multiOp->GetOperandCount())
{
case 0:
// This is a constant HWIntrinsic, we already have correct costs.
break;
case 1:
// A "unary" case.
level = gtSetEvalOrder(multiOp->Op(1));
costEx += multiOp->Op(1)->GetCostEx();
costSz += multiOp->Op(1)->GetCostSz();
break;
case 2:
// A "binary" case.
// This way we have "level" be the complexity of the
// first tree to be evaluated, and "lvl2" - the second.
if (multiOp->IsReverseOp())
{
level = gtSetEvalOrder(multiOp->Op(2));
lvl2 = gtSetEvalOrder(multiOp->Op(1));
}
else
{
level = gtSetEvalOrder(multiOp->Op(1));
lvl2 = gtSetEvalOrder(multiOp->Op(2));
}
// We want the more complex tree to be evaluated first.
if (level < lvl2)
{
bool canSwap = multiOp->IsReverseOp() ? gtCanSwapOrder(multiOp->Op(2), multiOp->Op(1))
: gtCanSwapOrder(multiOp->Op(1), multiOp->Op(2));
if (canSwap)
{
if (multiOp->IsReverseOp())
{
multiOp->ClearReverseOp();
}
else
{
multiOp->SetReverseOp();
}
std::swap(level, lvl2);
}
}
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
break;
default:
// The former "ArgList" case... we'll be emulating it here.
// The old implementation pushed the nodes on the list, in pre-order.
// Then it popped and costed them in "reverse order", so that's what
// we'll be doing here as well.
unsigned nxtlvl = 0;
for (size_t i = multiOp->GetOperandCount(); i >= 1; i--)
{
GenTree* op = multiOp->Op(i);
unsigned lvl = gtSetEvalOrder(op);
if (lvl < 1)
{
level = nxtlvl;
}
else if (lvl == nxtlvl)
{
level = lvl + 1;
}
else
{
level = lvl;
}
costEx += op->GetCostEx();
costSz += op->GetCostSz();
// Preserving previous behavior...
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_XARCH
if (op->GetCostSz() != 0)
{
costSz += 1;
}
#endif
nxtlvl = level;
}
break;
}
multiOp->SetCosts(costEx, costSz);
return level;
}
#endif
//-----------------------------------------------------------------------------
// gtWalkOp: Traverse and mark an address expression
//
// Arguments:
// op1WB - An out parameter which is either the address expression, or one
// of its operands.
// op2WB - An out parameter which starts as either null or one of the operands
// of the address expression.
// base - The base address of the addressing mode, or null if 'constOnly' is false
// constOnly - True if we will only traverse into ADDs with constant op2.
//
// This routine is a helper routine for gtSetEvalOrder() and is used to identify the
// base and index nodes, which will be validated against those identified by
// genCreateAddrMode().
// It also marks the ADD nodes involved in the address expression with the
// GTF_ADDRMODE_NO_CSE flag which prevents them from being considered for CSE's.
//
// Its two output parameters are modified under the following conditions:
//
// It is called once with the original address expression as 'op1WB', and
// with 'constOnly' set to false. On this first invocation, *op1WB is always
// an ADD node, and it will consider the operands of the ADD even if its op2 is
// not a constant. However, when it encounters a non-constant or the base in the
// op2 position, it stops iterating. That operand is returned in the 'op2WB' out
// parameter, and will be considered on the third invocation of this method if
// it is an ADD.
//
// It is called the second time with the two operands of the original expression, in
// the original order, and the third time in reverse order. For these invocations
// 'constOnly' is true, so it will only traverse cascaded ADD nodes if they have a
// constant op2.
//
// The result, after three invocations, is that the values of the two out parameters
// correspond to the base and index in some fashion. This method doesn't attempt
// to determine or validate the scale or offset, if any.
//
// Assumptions (presumed to be ensured by genCreateAddrMode()):
// If an ADD has a constant operand, it is in the op2 position.
//
// Notes:
// This method, and its invocation sequence, are quite confusing, and since they
// were not originally well-documented, this specification is a possibly-imperfect
// reconstruction.
// The motivation for the handling of the NOP case is unclear.
// Note that 'op2WB' is only modified in the initial (!constOnly) case,
// or if a NOP is encountered in the op1 position.
//
void Compiler::gtWalkOp(GenTree** op1WB, GenTree** op2WB, GenTree* base, bool constOnly)
{
GenTree* op1 = *op1WB;
GenTree* op2 = *op2WB;
op1 = op1->gtEffectiveVal();
// Now we look for op1's with non-overflow GT_ADDs [of constants]
while ((op1->gtOper == GT_ADD) && (!op1->gtOverflow()) && (!constOnly || (op1->AsOp()->gtOp2->IsCnsIntOrI())))
{
// mark it with GTF_ADDRMODE_NO_CSE
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (!constOnly)
{
op2 = op1->AsOp()->gtOp2;
}
op1 = op1->AsOp()->gtOp1;
// If op1 is a GT_NOP then swap op1 and op2.
// (Why? Also, presumably op2 is not a GT_NOP in this case?)
if (op1->gtOper == GT_NOP)
{
GenTree* tmp;
tmp = op1;
op1 = op2;
op2 = tmp;
}
if (!constOnly && ((op2 == base) || (!op2->IsCnsIntOrI())))
{
break;
}
op1 = op1->gtEffectiveVal();
}
*op1WB = op1;
*op2WB = op2;
}
#ifdef DEBUG
/*****************************************************************************
* This is a workaround. It is to help implement an assert in gtSetEvalOrder() that the values
* gtWalkOp() leaves in op1 and op2 correspond with the values of adr, idx, mul, and cns
* that are returned by genCreateAddrMode(). It's essentially impossible to determine
* what gtWalkOp() *should* return for all possible trees. This simply loosens one assert
* to handle the following case:
indir int
const(h) int 4 field
+ byref
lclVar byref V00 this <-- op2
comma byref <-- adr (base)
indir byte
lclVar byref V00 this
+ byref
const int 2 <-- mul == 4
<< int <-- op1
lclVar int V01 arg1 <-- idx
* Here, we are planning to generate the address mode [edx+4*eax], where eax = idx and edx = the GT_COMMA expression.
* To check adr equivalence with op2, we need to walk down the GT_ADD tree just like gtWalkOp() does.
*/
GenTree* Compiler::gtWalkOpEffectiveVal(GenTree* op)
{
for (;;)
{
op = op->gtEffectiveVal();
if ((op->gtOper != GT_ADD) || op->gtOverflow() || !op->AsOp()->gtOp2->IsCnsIntOrI())
{
break;
}
op = op->AsOp()->gtOp1;
}
return op;
}
#endif // DEBUG
/*****************************************************************************
*
* Given a tree, set the GetCostEx and GetCostSz() fields which
* are used to measure the relative costs of the codegen of the tree
*
*/
void Compiler::gtPrepareCost(GenTree* tree)
{
gtSetEvalOrder(tree);
}
bool Compiler::gtIsLikelyRegVar(GenTree* tree)
{
if (tree->gtOper != GT_LCL_VAR)
{
return false;
}
const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varDsc->lvDoNotEnregister)
{
return false;
}
// If this is an EH-live var, return false if it is a def,
// as it will have to go to memory.
if (varDsc->lvLiveInOutOfHndlr && ((tree->gtFlags & GTF_VAR_DEF) != 0))
{
return false;
}
// Be pessimistic if ref counts are not yet set up.
//
// Perhaps we should be optimistic though.
// See notes in GitHub issue 18969.
if (!lvaLocalVarRefCounted())
{
return false;
}
if (varDsc->lvRefCntWtd() < (BB_UNITY_WEIGHT * 3))
{
return false;
}
#ifdef TARGET_X86
if (varTypeUsesFloatReg(tree->TypeGet()))
return false;
if (varTypeIsLong(tree->TypeGet()))
return false;
#endif
return true;
}
//------------------------------------------------------------------------
// gtCanSwapOrder: Returns true iff the secondNode can be swapped with firstNode.
//
// Arguments:
// firstNode - An operand of a tree that can have GTF_REVERSE_OPS set.
// secondNode - The other operand of the tree.
//
// Return Value:
// Returns a boolean indicating whether it is safe to reverse the execution
// order of the two trees, considering any exception, global effects, or
// ordering constraints.
//
bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// Relative of order of global / side effects can't be swapped.
bool canSwap = true;
if (optValnumCSE_phase)
{
canSwap = optCSE_canSwap(firstNode, secondNode);
}
// We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
if (canSwap && (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
{
canSwap = false;
}
// When strict side effect order is disabled we allow GTF_REVERSE_OPS to be set
// when one or both sides contains a GTF_CALL or GTF_EXCEPT.
// Currently only the C and C++ languages allow non strict side effect order.
unsigned strictEffects = GTF_GLOB_EFFECT;
if (canSwap && (firstNode->gtFlags & strictEffects))
{
// op1 has side efects that can't be reordered.
// Check for some special cases where we still may be able to swap.
if (secondNode->gtFlags & strictEffects)
{
// op2 has also has non reorderable side effects - can't swap.
canSwap = false;
}
else
{
// No side effects in op2 - we can swap iff op1 has no way of modifying op2,
// i.e. through byref assignments or calls or op2 is a constant.
if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
{
// We have to be conservative - can swap iff op2 is constant.
if (!secondNode->IsInvariant())
{
canSwap = false;
}
}
}
}
return canSwap;
}
//------------------------------------------------------------------------
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
//
// Arguments:
// addr - The address expression
// costEx - The execution cost of this address expression (in/out arg to be updated)
// costEx - The size cost of this address expression (in/out arg to be updated)
// type - The type of the value being referenced by the parent of this address expression.
//
// Return Value:
// Returns true if it finds an addressing mode.
//
// Notes:
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
//
bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_types type)
{
// These are "out" parameters on the call to genCreateAddrMode():
bool rev; // This will be true if the operands will need to be reversed. At this point we
// don't care about this because we're not yet instantiating this addressing mode.
unsigned mul; // This is the index (scale) value for the addressing mode
ssize_t cns; // This is the constant offset
GenTree* base; // This is the base of the address.
GenTree* idx; // This is the index.
if (codeGen->genCreateAddrMode(addr, false /*fold*/, &rev, &base, &idx, &mul, &cns))
{
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((mul > 0) && (genTypeSize(type) != mul))
{
return false;
}
#endif
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
#ifdef TARGET_XARCH
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
// all of these: { base, idx, cns, mul }
//
unsigned addrmodeCount = 0;
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
addrmodeCount++;
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
addrmodeCount++;
}
if (cns)
{
if (((signed char)cns) == ((int)cns))
{
*pCostSz += 1;
}
else
{
*pCostSz += 4;
}
addrmodeCount++;
}
if (mul)
{
addrmodeCount++;
}
// When we form a complex addressing mode we can reduced the costs
// associated with the interior GT_ADD and GT_LSH nodes:
//
// GT_ADD -- reduce this interior GT_ADD by (-3,-3)
// / \ --
// GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
// / \ --
// 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
// / \ --
// 'idx' 'mul'
//
if (addrmodeCount > 1)
{
// The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
//
addrmodeCount--;
GenTree* tmp = addr;
while (addrmodeCount > 0)
{
// decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining
// addrmodeCount
tmp->SetCosts(tmp->GetCostEx() - addrmodeCount, tmp->GetCostSz() - addrmodeCount);
addrmodeCount--;
if (addrmodeCount > 0)
{
GenTree* tmpOp1 = tmp->AsOp()->gtOp1;
GenTree* tmpOp2 = tmp->gtGetOp2();
assert(tmpOp2 != nullptr);
if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_LSH)
{
tmp = tmpOp2;
}
else if (tmpOp1->OperGet() == GT_LSH)
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_ADD)
{
tmp = tmpOp2;
}
else
{
// We can very rarely encounter a tree that has a GT_COMMA node
// that is difficult to walk, so we just early out without decrementing.
addrmodeCount = 0;
}
}
}
}
#elif defined TARGET_ARM
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
{
*pCostSz -= 1;
}
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
if (mul > 0)
{
*pCostSz += 2;
}
}
if (cns)
{
if (cns >= 128) // small offsets fits into a 16-bit instruction
{
if (cns < 4096) // medium offsets require a 32-bit instruction
{
if (!varTypeIsFloating(type))
{
*pCostSz += 2;
}
}
else
{
*pCostEx += 2; // Very large offsets require movw/movt instructions
*pCostSz += 8;
}
}
}
#elif defined TARGET_ARM64
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (cns >= (4096 * genTypeSize(type)))
{
*pCostEx += 1;
*pCostSz += 4;
}
}
#else
#error "Unknown TARGET"
#endif
assert(addr->gtOper == GT_ADD);
assert(!addr->gtOverflow());
assert(mul != 1);
// If we have an addressing mode, we have one of:
// [base + cns]
// [ idx * mul ] // mul >= 2, else we would use base instead of idx
// [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
// [base + idx * mul ] // mul can be 0, 2, 4, or 8
// [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
assert((base != nullptr) || (idx != nullptr && mul >= 2));
INDEBUG(GenTree* op1Save = addr);
// Walk 'addr' identifying non-overflow ADDs that will be part of the address mode.
// Note that we will be modifying 'op1' and 'op2' so that eventually they should
// map to the base and index.
GenTree* op1 = addr;
GenTree* op2 = nullptr;
gtWalkOp(&op1, &op2, base, false);
// op1 and op2 are now descendents of the root GT_ADD of the addressing mode.
assert(op1 != op1Save);
assert(op2 != nullptr);
#if defined(TARGET_XARCH)
// Walk the operands again (the third operand is unused in this case).
// This time we will only consider adds with constant op2's, since
// we have already found either a non-ADD op1 or a non-constant op2.
// NOTE: we don't support ADD(op1, cns) addressing for ARM/ARM64 yet so
// this walk makes no sense there.
gtWalkOp(&op1, &op2, nullptr, true);
// For XARCH we will fold GT_ADDs in the op2 position into the addressing mode, so we call
// gtWalkOp on both operands of the original GT_ADD.
// This is not done for ARMARCH. Though the stated reason is that we don't try to create a
// scaled index, in fact we actually do create them (even base + index*scale + offset).
// At this point, 'op2' may itself be an ADD of a constant that should be folded
// into the addressing mode.
// Walk op2 looking for non-overflow GT_ADDs of constants.
gtWalkOp(&op2, &op1, nullptr, true);
#endif // defined(TARGET_XARCH)
// OK we are done walking the tree
// Now assert that op1 and op2 correspond with base and idx
// in one of the several acceptable ways.
// Note that sometimes op1/op2 is equal to idx/base
// and other times op1/op2 is a GT_COMMA node with
// an effective value that is idx/base
if (mul > 1)
{
if ((op1 != base) && (op1->gtOper == GT_LSH))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1->AsOp()->gtOp1->gtOper == GT_MUL)
{
op1->AsOp()->gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
assert((base == nullptr) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
(gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
else
{
assert(op2 != nullptr);
assert(op2->OperIs(GT_LSH, GT_MUL));
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
// We may have eliminated multiple shifts and multiplies in the addressing mode,
// so navigate down through them to get to "idx".
GenTree* op2op1 = op2->AsOp()->gtOp1;
while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
op2op1 = op2op1->AsOp()->gtOp1;
}
assert(op1->gtEffectiveVal() == base);
assert(op2op1 == idx);
}
}
else
{
assert(mul == 0);
if ((op1 == idx) || (op1->gtEffectiveVal() == idx))
{
if (idx != nullptr)
{
if ((op1->gtOper == GT_MUL) || (op1->gtOper == GT_LSH))
{
GenTree* op1op1 = op1->AsOp()->gtOp1;
if ((op1op1->gtOper == GT_NOP) ||
(op1op1->gtOper == GT_MUL && op1op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1op1->gtOper == GT_MUL)
{
op1op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
}
assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
else if ((op1 == base) || (op1->gtEffectiveVal() == base))
{
if (idx != nullptr)
{
assert(op2 != nullptr);
if (op2->OperIs(GT_MUL, GT_LSH))
{
GenTree* op2op1 = op2->AsOp()->gtOp1;
if ((op2op1->gtOper == GT_NOP) ||
(op2op1->gtOper == GT_MUL && op2op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op2op1->gtOper == GT_MUL)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
}
}
else
{
// op1 isn't base or idx. Is this possible? Or should there be an assert?
}
}
return true;
} // end if (genCreateAddrMode(...))
return false;
}
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
* evaluated. If the second operand of a binary operator is more expensive
* than the first operand, then try to swap the operand trees. Updates the
* GTF_REVERSE_OPS bit if necessary in this case.
*
* Returns the Sethi 'complexity' estimate for this tree (the higher
* the number, the higher is the tree's resources requirement).
*
* This function sets:
* 1. GetCostEx() to the execution complexity estimate
* 2. GetCostSz() to the code size estimate
* 3. Sometimes sets GTF_ADDRMODE_NO_CSE on nodes in the tree.
* 4. DEBUG-only: clears GTF_DEBUG_NODE_MORPHED.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
#ifdef DEBUG
/* Clear the GTF_DEBUG_NODE_MORPHED flag as well */
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
/* Is this a FP value? */
bool isflt = varTypeIsFloating(tree->TypeGet());
/* Figure out what kind of a node we have */
const genTreeOps oper = tree->OperGet();
const unsigned kind = tree->OperKind();
/* Assume no fixed registers will be trashed */
unsigned level;
int costEx;
int costSz;
#ifdef DEBUG
costEx = -1;
costSz = -1;
#endif
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
#ifdef TARGET_ARM
case GT_CNS_STR:
// Uses movw/movt
costSz = 8;
costEx = 2;
goto COMMON_CNS;
case GT_CNS_LNG:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
INT64 lngVal = con->LngValue();
INT32 loVal = (INT32)(lngVal & 0xffffffff);
INT32 hiVal = (INT32)(lngVal >> 32);
if (lngVal == 0)
{
costSz = 1;
costEx = 1;
}
else
{
// Minimum of one instruction to setup hiVal,
// and one instruction to setup loVal
costSz = 4 + 4;
costEx = 1 + 1;
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
}
goto COMMON_CNS;
}
case GT_CNS_INT:
{
// If the constant is a handle then it will need to have a relocation
// applied to it.
// Any constant that requires a reloc must use the movw/movt sequence
//
GenTreeIntConCommon* con = tree->AsIntConCommon();
target_ssize_t conVal = (target_ssize_t)con->IconValue();
if (con->ImmedValNeedsReloc(this))
{
// Requires movw/movt
costSz = 8;
costEx = 2;
}
else if (codeGen->validImmForInstr(INS_add, conVal))
{
// Typically included with parent oper
costSz = 2;
costEx = 1;
}
else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
{
// Uses mov or mvn
costSz = 4;
costEx = 1;
}
else
{
// Needs movw/movt
costSz = 8;
costEx = 2;
}
goto COMMON_CNS;
}
#elif defined TARGET_XARCH
case GT_CNS_STR:
#ifdef TARGET_AMD64
costSz = 10;
costEx = 2;
#else // TARGET_X86
costSz = 4;
costEx = 1;
#endif
goto COMMON_CNS;
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue();
bool fitsInVal = true;
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
INT64 lngVal = con->LngValue();
conVal = (ssize_t)lngVal; // truncate to 32-bits
fitsInVal = ((INT64)conVal == lngVal);
}
#endif // TARGET_X86
// If the constant is a handle then it will need to have a relocation
// applied to it.
//
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
if (iconNeedsReloc)
{
costSz = 4;
costEx = 1;
}
else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal))
{
costSz = 1;
costEx = 1;
}
#ifdef TARGET_AMD64
else if (!GenTreeIntConCommon::FitsInI32(conVal))
{
costSz = 10;
costEx = 2;
}
#endif // TARGET_AMD64
else
{
costSz = 4;
costEx = 1;
}
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
costSz += fitsInVal ? 1 : 4;
costEx += 1;
}
#endif // TARGET_X86
goto COMMON_CNS;
}
#elif defined(TARGET_ARM64)
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
INT64 imm = con->LngValue();
emitAttr size = EA_SIZE(emitActualTypeSize(tree));
if (iconNeedsReloc)
{
costSz = 8;
costEx = 2;
}
else if (emitter::emitIns_valid_imm_for_add(imm, size))
{
costSz = 2;
costEx = 1;
}
else if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
costSz = 4;
costEx = 1;
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting
// the register
// In some cases it is preferable to use movn, because it has the side effect of filling the
// other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
bool preferMovz = false;
bool preferMovn = false;
int instructionCount = 4;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (!preferMovn && (uint16_t(imm >> i) == 0x0000))
{
preferMovz = true; // by using a movk to start we can save one instruction
instructionCount--;
}
else if (!preferMovz && (uint16_t(imm >> i) == 0xffff))
{
preferMovn = true; // by using a movn to start we can save one instruction
instructionCount--;
}
}
costEx = instructionCount;
costSz = 4 * instructionCount;
}
}
goto COMMON_CNS;
#else
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
#error "Unknown TARGET"
#endif
COMMON_CNS:
/*
Note that some code below depends on constants always getting
moved to be the second operand of a binary operator. This is
easily accomplished by giving constants a level of 0, which
we do on the next line. If you ever decide to change this, be
aware that unless you make other arrangements for integer
constants to be moved, stuff will break.
*/
level = 0;
break;
case GT_CNS_DBL:
{
level = 0;
#if defined(TARGET_XARCH)
/* We use fldz and fld1 to load 0.0 and 1.0, but all other */
/* floating point constants are loaded using an indirection */
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
(*((__int64*)&(tree->AsDblCon()->gtDconVal)) == I64(0x3ff0000000000000)))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#elif defined(TARGET_ARM)
var_types targetType = tree->TypeGet();
if (targetType == TYP_FLOAT)
{
costEx = 1 + 2;
costSz = 2 + 4;
}
else
{
assert(targetType == TYP_DOUBLE);
costEx = 1 + 4;
costSz = 2 + 8;
}
#elif defined(TARGET_ARM64)
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->gtDconVal))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#else
#error "Unknown TARGET"
#endif
}
break;
case GT_LCL_VAR:
level = 1;
if (gtIsLikelyRegVar(tree))
{
costEx = 1;
costSz = 1;
/* Sign-extend and zero-extend are more expensive to load */
if (lvaTable[tree->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
costEx += 1;
costSz += 1;
}
}
else
{
costEx = IND_COST_EX;
costSz = 2;
/* Sign-extend and zero-extend are more expensive to load */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
}
#if defined(TARGET_AMD64)
// increase costSz for floating point locals
if (isflt)
{
costSz += 1;
if (!gtIsLikelyRegVar(tree))
{
costSz += 1;
}
}
#endif
break;
case GT_CLS_VAR:
#ifdef TARGET_ARM
// We generate movw/movt/ldr
level = 1;
costEx = 3 + IND_COST_EX; // 6
costSz = 4 + 4 + 2; // 10
break;
#endif
case GT_LCL_FLD:
level = 1;
costEx = IND_COST_EX;
costSz = 4;
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
level = 1;
costEx = 3;
costSz = 3;
break;
case GT_PHI_ARG:
case GT_ARGPLACE:
level = 0;
costEx = 0;
costSz = 0;
break;
default:
level = 1;
costEx = 1;
costSz = 1;
break;
}
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
int lvlb; // preference for op2
unsigned lvl2; // scratch variable
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
costEx = 0;
costSz = 0;
if (tree->OperIsAddrMode())
{
if (op1 == nullptr)
{
op1 = op2;
op2 = nullptr;
}
}
/* Check for a nilary operator */
if (op1 == nullptr)
{
assert(op2 == nullptr);
level = 0;
goto DONE;
}
/* Is this a unary operator? */
if (op2 == nullptr)
{
/* Process the operand of the operator */
/* Most Unary ops have costEx of 1 */
costEx = 1;
costSz = 1;
level = gtSetEvalOrder(op1);
GenTreeIntrinsic* intrinsic;
/* Special handling for some operators */
switch (oper)
{
case GT_JTRUE:
costEx = 2;
costSz = 2;
break;
case GT_SWITCH:
costEx = 10;
costSz = 5;
break;
case GT_CAST:
#if defined(TARGET_ARM)
costEx = 1;
costSz = 1;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 3;
costSz = 4;
}
#elif defined(TARGET_ARM64)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 2;
costSz = 4;
}
#elif defined(TARGET_XARCH)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
/* cast involving floats always go through memory */
costEx = IND_COST_EX * 2;
costSz = 6;
}
#else
#error "Unknown TARGET"
#endif
/* Overflow casts are a lot more expensive */
if (tree->gtOverflow())
{
costEx += 6;
costSz += 6;
}
break;
case GT_NOP:
costEx = 0;
costSz = 0;
break;
case GT_INTRINSIC:
intrinsic = tree->AsIntrinsic();
// named intrinsic
assert(intrinsic->gtIntrinsicName != NI_Illegal);
// GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
// TODO: tune these costs target specific as some of these are
// target intrinsics and would cost less to generate code.
switch (intrinsic->gtIntrinsicName)
{
default:
assert(!"missing case for gtIntrinsicName");
costEx = 12;
costSz = 12;
break;
case NI_System_Math_Abs:
costEx = 5;
costSz = 15;
break;
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls. We don't do this for target intrinsics
// however as they typically represent single instruction calls
if (IsIntrinsicImplementedByUserCall(intrinsic->gtIntrinsicName))
{
costEx = 36;
costSz = 4;
}
else
{
costEx = 3;
costSz = 4;
}
break;
}
case NI_System_Object_GetType:
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls.
costEx = 36;
costSz = 4;
break;
}
level++;
break;
case GT_NOT:
case GT_NEG:
// We need to ensure that -x is evaluated before x or else
// we get burned while adjusting genFPstkLevel in x*-x where
// the rhs x is the last use of the enregistered x.
//
// Even in the integer case we want to prefer to
// evaluate the side without the GT_NEG node, all other things
// being equal. Also a GT_NOT requires a scratch register
level++;
break;
case GT_ADDR:
costEx = 0;
costSz = 1;
// If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
if (op1->OperGet() == GT_IND)
{
GenTree* indOp1 = op1->AsOp()->gtOp1;
costEx = indOp1->GetCostEx();
costSz = indOp1->GetCostSz();
}
break;
case GT_ARR_LENGTH:
level++;
/* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
costEx = IND_COST_EX - 1;
costSz = 2;
break;
case GT_MKREFANY:
case GT_OBJ:
// We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BOX:
// We estimate the cost of a GT_BOX to be two stores (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BLK:
case GT_IND:
/* An indirection should always have a non-zero level.
* Only constant leaf nodes have level 0.
*/
if (level == 0)
{
level = 1;
}
/* Indirections have a costEx of IND_COST_EX */
costEx = IND_COST_EX;
costSz = 2;
/* If we have to sign-extend or zero-extend, bump the cost */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
if (isflt)
{
if (tree->TypeGet() == TYP_DOUBLE)
{
costEx += 1;
}
#ifdef TARGET_ARM
costSz += 2;
#endif // TARGET_ARM
}
// Can we form an addressing mode with this indirection?
// TODO-CQ: Consider changing this to op1->gtEffectiveVal() to take into account
// addressing modes hidden under a comma node.
if (op1->gtOper == GT_ADD)
{
// See if we can form a complex addressing mode.
GenTree* addr = op1->gtEffectiveVal();
bool doAddrMode = true;
// See if we can form a complex addressing mode.
// Always use an addrMode for an array index indirection.
// TODO-1stClassStructs: Always do this, but first make sure it's
// done in Lowering as well.
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
if (tree->TypeGet() == TYP_STRUCT)
{
doAddrMode = false;
}
else if (varTypeIsStruct(tree))
{
// This is a heuristic attempting to match prior behavior when indirections
// under a struct assignment would not be considered for addressing modes.
if (compCurStmt != nullptr)
{
GenTree* expr = compCurStmt->GetRootNode();
if ((expr->OperGet() == GT_ASG) &&
((expr->gtGetOp1() == tree) || (expr->gtGetOp2() == tree)))
{
doAddrMode = false;
}
}
}
}
#ifdef TARGET_ARM64
if (tree->gtFlags & GTF_IND_VOLATILE)
{
// For volatile store/loads when address is contained we always emit `dmb`
// if it's not - we emit one-way barriers i.e. ldar/stlr
doAddrMode = false;
}
#endif // TARGET_ARM64
if (doAddrMode && gtMarkAddrMode(addr, &costEx, &costSz, tree->TypeGet()))
{
goto DONE;
}
} // end if (op1->gtOper == GT_ADD)
else if (gtIsLikelyRegVar(op1))
{
/* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
goto DONE;
}
#ifdef TARGET_XARCH
else if (op1->IsCnsIntOrI())
{
// Indirection of a CNS_INT, subtract 1 from costEx
// makes costEx 3 for x86 and 4 for amd64
//
costEx += (op1->GetCostEx() - 1);
costSz += op1->GetCostSz();
goto DONE;
}
#endif
break;
default:
break;
}
costEx += op1->GetCostEx();
costSz += op1->GetCostSz();
goto DONE;
}
/* Binary operator - check for certain special cases */
lvlb = 0;
/* Default Binary ops have a cost of 1,1 */
costEx = 1;
costSz = 1;
#ifdef TARGET_ARM
if (isflt)
{
costSz += 2;
}
#endif
#ifndef TARGET_64BIT
if (varTypeIsLong(op1->TypeGet()))
{
/* Operations on longs are more expensive */
costEx += 3;
costSz += 3;
}
#endif
switch (oper)
{
case GT_MOD:
case GT_UMOD:
/* Modulo by a power of 2 is easy */
if (op2->IsCnsIntOrI())
{
size_t ival = op2->AsIntConCommon()->IconValue();
if (ival > 0 && ival == genFindLowestBit(ival))
{
break;
}
}
FALLTHROUGH;
case GT_DIV:
case GT_UDIV:
if (isflt)
{
/* fp division is very expensive to execute */
costEx = 36; // TYP_DOUBLE
costSz += 3;
}
else
{
/* integer division is also very expensive */
costEx = 20;
costSz += 2;
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 3;
}
break;
case GT_MUL:
if (isflt)
{
/* FP multiplication instructions are more expensive */
costEx += 4;
costSz += 3;
}
else
{
/* Integer multiplication instructions are more expensive */
costEx += 3;
costSz += 2;
if (tree->gtOverflow())
{
/* Overflow check are more expensive */
costEx += 3;
costSz += 3;
}
#ifdef TARGET_X86
if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
{
/* We use imulEAX for TYP_LONG and overflow multiplications */
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 4;
/* The 64-bit imul instruction costs more */
costEx += 4;
}
#endif // TARGET_X86
}
break;
case GT_ADD:
case GT_SUB:
if (isflt)
{
/* FP instructions are a bit more expensive */
costEx += 4;
costSz += 3;
break;
}
/* Overflow check are more expensive */
if (tree->gtOverflow())
{
costEx += 3;
costSz += 3;
}
break;
case GT_BOUNDS_CHECK:
costEx = 4; // cmp reg,reg and jae throw (not taken)
costSz = 7; // jump to cold section
break;
case GT_COMMA:
/* Comma tosses the result of the left operand */
gtSetEvalOrder(op1);
level = gtSetEvalOrder(op2);
/* GT_COMMA cost is the sum of op1 and op2 costs */
costEx = (op1->GetCostEx() + op2->GetCostEx());
costSz = (op1->GetCostSz() + op2->GetCostSz());
goto DONE;
case GT_COLON:
level = gtSetEvalOrder(op1);
lvl2 = gtSetEvalOrder(op2);
if (level < lvl2)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx = op1->GetCostEx() + op2->GetCostEx();
costSz = op1->GetCostSz() + op2->GetCostSz();
goto DONE;
case GT_INDEX_ADDR:
costEx = 6; // cmp reg,reg; jae throw; mov reg, [addrmode] (not taken)
costSz = 9; // jump to cold section
break;
case GT_ASG:
/* Assignments need a bit of special handling */
/* Process the target */
level = gtSetEvalOrder(op1);
if (gtIsLikelyRegVar(op1))
{
assert(lvlb == 0);
lvl2 = gtSetEvalOrder(op2);
/* Assignment to an enregistered LCL_VAR */
costEx = op2->GetCostEx();
costSz = max(3, op2->GetCostSz()); // 3 is an estimate for a reg-reg assignment
goto DONE_OP1_AFTER_COST;
}
goto DONE_OP1;
default:
break;
}
/* Process the sub-operands */
level = gtSetEvalOrder(op1);
if (lvlb < 0)
{
level -= lvlb; // lvlb is negative, so this increases level
lvlb = 0;
}
DONE_OP1:
assert(lvlb >= 0);
lvl2 = gtSetEvalOrder(op2) + lvlb;
costEx += (op1->GetCostEx() + op2->GetCostEx());
costSz += (op1->GetCostSz() + op2->GetCostSz());
DONE_OP1_AFTER_COST:
bool bReverseInAssignment = false;
if (oper == GT_ASG && (!optValnumCSE_phase || optCSE_canSwap(op1, op2)))
{
GenTree* op1Val = op1;
// Skip over the GT_IND/GT_ADDR tree (if one exists)
//
if ((op1->gtOper == GT_IND) && (op1->AsOp()->gtOp1->gtOper == GT_ADDR))
{
op1Val = op1->AsOp()->gtOp1->AsOp()->gtOp1;
}
switch (op1Val->gtOper)
{
case GT_IND:
case GT_BLK:
case GT_OBJ:
{
// In an ASG(IND(addr), ...), the "IND" is a pure syntactical element,
// the actual indirection will only be realized at the point of the ASG
// itself. As such, we can disard any side effects "induced" by it in
// this logic.
//
// Note that for local "addr"s, liveness depends on seeing the defs and
// uses in correct order, and so we MUST reverse the ASG in that case.
//
GenTree* op1Addr = op1->AsIndir()->Addr();
if (op1Addr->IsLocalAddrExpr() || op1Addr->IsInvariant())
{
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
}
if (op1Addr->gtFlags & GTF_ALL_EFFECT)
{
break;
}
// In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
if (op2->gtFlags & GTF_ASG)
{
break;
}
// If op2 is simple then evaluate op1 first
if (op2->OperKind() & GTK_LEAF)
{
break;
}
}
// fall through and set GTF_REVERSE_OPS
FALLTHROUGH;
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_CLS_VAR:
// We evaluate op2 before op1
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
default:
break;
}
}
else if (GenTree::OperIsCompare(oper))
{
/* Float compares remove both operands from the FP stack */
/* Also FP comparison uses EAX for flags */
if (varTypeIsFloating(op1->TypeGet()))
{
level++;
lvl2++;
}
if ((tree->gtFlags & GTF_RELOP_JMP_USED) == 0)
{
/* Using a setcc instruction is more expensive */
costEx += 3;
}
}
/* Check for other interesting cases */
switch (oper)
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
/* Variable sized shifts are more expensive and use REG_SHIFT */
if (!op2->IsCnsIntOrI())
{
costEx += 3;
#ifndef TARGET_64BIT
// Variable sized LONG shifts require the use of a helper call
//
if (tree->gtType == TYP_LONG)
{
level += 5;
lvl2 += 5;
costEx += 3 * IND_COST_EX;
costSz += 4;
}
#endif // !TARGET_64BIT
}
break;
case GT_INTRINSIC:
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Atan2:
case NI_System_Math_Pow:
// These math intrinsics are actually implemented by user calls.
// Increase the Sethi 'complexity' by two to reflect the argument
// register requirement.
level += 2;
break;
case NI_System_Math_Max:
case NI_System_Math_Min:
level++;
break;
default:
assert(!"Unknown binary GT_INTRINSIC operator");
break;
}
break;
default:
break;
}
/* We need to evalutate constants later as many places in codegen
can't handle op1 being a constant. This is normally naturally
enforced as constants have the least level of 0. However,
sometimes we end up with a tree like "cns1 < nop(cns2)". In
such cases, both sides have a level of 0. So encourage constants
to be evaluated last in such cases */
if ((level == 0) && (level == lvl2) && op1->OperIsConst() &&
(tree->OperIsCommutative() || tree->OperIsCompare()))
{
lvl2++;
}
/* We try to swap operands if the second one is more expensive */
bool tryToSwap;
GenTree* opA;
GenTree* opB;
if (tree->gtFlags & GTF_REVERSE_OPS)
{
opA = op2;
opB = op1;
}
else
{
opA = op1;
opB = op2;
}
if (fgOrder == FGOrderLinear)
{
// Don't swap anything if we're in linear order; we're really just interested in the costs.
tryToSwap = false;
}
else if (bReverseInAssignment)
{
// Assignments are special, we want the reverseops flags
// so if possible it was set above.
tryToSwap = false;
}
else if ((oper == GT_INTRINSIC) && IsIntrinsicImplementedByUserCall(tree->AsIntrinsic()->gtIntrinsicName))
{
// We do not swap operand execution order for intrinsics that are implemented by user calls
// because of trickiness around ensuring the execution order does not change during rationalization.
tryToSwap = false;
}
else if (oper == GT_BOUNDS_CHECK)
{
// Bounds check nodes used to not be binary, thus GTF_REVERSE_OPS was
// not enabled for them. This condition preserves that behavior.
// Additionally, CQ analysis shows that enabling GTF_REVERSE_OPS
// for these nodes leads to mixed results at best.
tryToSwap = false;
}
else
{
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tryToSwap = (level > lvl2);
}
else
{
tryToSwap = (level < lvl2);
}
// Try to force extra swapping when in the stress mode:
if (compStressCompile(STRESS_REVERSE_FLAG, 60) && ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
!op2->OperIsConst())
{
tryToSwap = true;
}
}
if (tryToSwap)
{
bool canSwap = gtCanSwapOrder(opA, opB);
if (canSwap)
{
/* Can we swap the order by commuting the operands? */
switch (oper)
{
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (GenTree::SwapRelop(oper) != oper)
{
tree->SetOper(GenTree::SwapRelop(oper), GenTree::PRESERVE_VN);
}
FALLTHROUGH;
case GT_ADD:
case GT_MUL:
case GT_OR:
case GT_XOR:
case GT_AND:
/* Swap the operands */
tree->AsOp()->gtOp1 = op2;
tree->AsOp()->gtOp2 = op1;
break;
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
break;
default:
/* Mark the operand's evaluation order to be swapped */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
else
{
tree->gtFlags |= GTF_REVERSE_OPS;
}
break;
}
}
}
/* Swap the level counts */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
unsigned tmpl;
tmpl = level;
level = lvl2;
lvl2 = tmpl;
}
/* Compute the sethi number for this binary operator */
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
unsigned lvl2; // Scratch variable
case GT_CALL:
assert(tree->gtFlags & GTF_CALL);
level = 0;
costEx = 5;
costSz = 2;
GenTreeCall* call;
call = tree->AsCall();
/* Evaluate the 'this' argument, if present */
if (tree->AsCall()->gtCallThisArg != nullptr)
{
GenTree* thisVal = tree->AsCall()->gtCallThisArg->GetNode();
lvl2 = gtSetEvalOrder(thisVal);
if (level < lvl2)
{
level = lvl2;
}
costEx += thisVal->GetCostEx();
costSz += thisVal->GetCostSz() + 1;
}
/* Evaluate the arguments, right to left */
if (call->gtCallArgs != nullptr)
{
const bool lateArgs = false;
lvl2 = gtSetCallArgsOrder(call->Args(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
/* Evaluate the temp register arguments list
* This is a "hidden" list and its only purpose is to
* extend the life of temps until we make the call */
if (call->gtCallLateArgs != nullptr)
{
const bool lateArgs = true;
lvl2 = gtSetCallArgsOrder(call->LateArgs(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
if (call->gtCallType == CT_INDIRECT)
{
// pinvoke-calli cookie is a constant, or constant indirection
assert(call->gtCallCookie == nullptr || call->gtCallCookie->gtOper == GT_CNS_INT ||
call->gtCallCookie->gtOper == GT_IND);
GenTree* indirect = call->gtCallAddr;
lvl2 = gtSetEvalOrder(indirect);
if (level < lvl2)
{
level = lvl2;
}
costEx += indirect->GetCostEx() + IND_COST_EX;
costSz += indirect->GetCostSz();
}
else
{
if (call->IsVirtual())
{
GenTree* controlExpr = call->gtControlExpr;
if (controlExpr != nullptr)
{
lvl2 = gtSetEvalOrder(controlExpr);
if (level < lvl2)
{
level = lvl2;
}
costEx += controlExpr->GetCostEx();
costSz += controlExpr->GetCostSz();
}
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
// We generate movw/movt/ldr
costEx += (1 + IND_COST_EX);
costSz += 8;
if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
{
// Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
costSz += 2;
}
}
else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
costEx += 2;
costSz += 6;
}
costSz += 2;
#endif
#ifdef TARGET_XARCH
costSz += 3;
#endif
}
level += 1;
/* Virtual calls are a bit more expensive */
if (call->IsVirtual())
{
costEx += 2 * IND_COST_EX;
costSz += 2;
}
level += 5;
costEx += 3 * IND_COST_EX;
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
return gtSetMultiOpOrder(tree->AsMultiOp());
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
level = gtSetEvalOrder(arrElem->gtArrObj);
costEx = arrElem->gtArrObj->GetCostEx();
costSz = arrElem->gtArrObj->GetCostSz();
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
lvl2 = gtSetEvalOrder(arrElem->gtArrInds[dim]);
if (level < lvl2)
{
level = lvl2;
}
costEx += arrElem->gtArrInds[dim]->GetCostEx();
costSz += arrElem->gtArrInds[dim]->GetCostSz();
}
level += arrElem->gtArrRank;
costEx += 2 + (arrElem->gtArrRank * (IND_COST_EX + 1));
costSz += 2 + (arrElem->gtArrRank * 2);
}
break;
case GT_ARR_OFFSET:
level = gtSetEvalOrder(tree->AsArrOffs()->gtOffset);
costEx = tree->AsArrOffs()->gtOffset->GetCostEx();
costSz = tree->AsArrOffs()->gtOffset->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtIndex);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtIndex->GetCostEx();
costSz += tree->AsArrOffs()->gtIndex->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtArrObj);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtArrObj->GetCostEx();
costSz += tree->AsArrOffs()->gtArrObj->GetCostSz();
break;
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
lvl2 = gtSetEvalOrder(use.GetNode());
// PHI args should always have cost 0 and level 0
assert(lvl2 == 0);
assert(use.GetNode()->GetCostEx() == 0);
assert(use.GetNode()->GetCostSz() == 0);
}
// Give it a level of 2, just to be sure that it's greater than the LHS of
// the parent assignment and the PHI gets evaluated first in linear order.
// See also SsaBuilder::InsertPhi and SsaBuilder::AddPhiArg.
level = 2;
costEx = 0;
costSz = 0;
break;
case GT_FIELD_LIST:
level = 0;
costEx = 0;
costSz = 0;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
unsigned opLevel = gtSetEvalOrder(use.GetNode());
level = max(level, opLevel);
gtSetEvalOrder(use.GetNode());
costEx += use.GetNode()->GetCostEx();
costSz += use.GetNode()->GetCostSz();
}
break;
case GT_CMPXCHG:
level = gtSetEvalOrder(tree->AsCmpXchg()->gtOpLocation);
costSz = tree->AsCmpXchg()->gtOpLocation->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpValue);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpValue->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpComparand);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpComparand->GetCostSz();
costEx = MAX_COST; // Seriously, what could be more expensive than lock cmpxchg?
costSz += 5; // size of lock cmpxchg [reg+C], reg
break;
case GT_STORE_DYN_BLK:
level = gtSetEvalOrder(tree->AsStoreDynBlk()->Addr());
costEx = tree->AsStoreDynBlk()->Addr()->GetCostEx();
costSz = tree->AsStoreDynBlk()->Addr()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->Data());
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->Data()->GetCostEx();
costSz += tree->AsStoreDynBlk()->Data()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->gtDynamicSize);
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->gtDynamicSize->GetCostEx();
costSz += tree->AsStoreDynBlk()->gtDynamicSize->GetCostSz();
break;
default:
JITDUMP("unexpected operator in this tree:\n");
DISPTREE(tree);
NO_WAY("unexpected operator");
}
DONE:
// Some path through this function must have set the costs.
assert(costEx != -1);
assert(costSz != -1);
tree->SetCosts(costEx, costSz);
return level;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
#ifdef DEBUG
bool GenTree::OperSupportsReverseOpEvalOrder(Compiler* comp) const
{
if (OperIsBinary())
{
if ((AsOp()->gtGetOp1() == nullptr) || (AsOp()->gtGetOp2() == nullptr))
{
return false;
}
if (OperIs(GT_COMMA, GT_BOUNDS_CHECK))
{
return false;
}
if (OperIs(GT_INTRINSIC))
{
return !comp->IsIntrinsicImplementedByUserCall(AsIntrinsic()->gtIntrinsicName);
}
return true;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
if (OperIsMultiOp())
{
return AsMultiOp()->GetOperandCount() == 2;
}
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
return false;
}
#endif // DEBUG
/*****************************************************************************
*
* If the given tree is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0. Note that we never return 1,
* to match the behavior of GetScaleIndexShf().
*/
unsigned GenTree::GetScaleIndexMul()
{
if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntConCommon()->IconValue()) && AsIntConCommon()->IconValue() != 1)
{
return (unsigned)AsIntConCommon()->IconValue();
}
return 0;
}
/*****************************************************************************
*
* If the given tree is the right-hand side of a left shift (that is,
* 'y' in the tree 'x' << 'y'), and it is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0.
*/
unsigned GenTree::GetScaleIndexShf()
{
if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntConCommon()->IconValue()))
{
return (unsigned)(1 << AsIntConCommon()->IconValue());
}
return 0;
}
/*****************************************************************************
*
* If the given tree is a scaled index (i.e. "op * 4" or "op << 2"), returns
* the multiplier: 2, 4, or 8; otherwise returns 0. Note that "1" is never
* returned.
*/
unsigned GenTree::GetScaledIndex()
{
// with (!opts.OptEnabled(CLFLG_CONSTANTFOLD) we can have
// CNS_INT * CNS_INT
//
if (AsOp()->gtOp1->IsCnsIntOrI())
{
return 0;
}
switch (gtOper)
{
case GT_MUL:
return AsOp()->gtOp2->GetScaleIndexMul();
case GT_LSH:
return AsOp()->gtOp2->GetScaleIndexShf();
default:
assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
break;
}
return 0;
}
//------------------------------------------------------------------------
// TryGetUse: Get the use edge for an operand of this tree.
//
// Arguments:
// operand - the node to find the use for
// pUse - [out] parameter for the use
//
// Return Value:
// Whether "operand" is a child of this node. If it is, "*pUse" is set,
// allowing for the replacement of "operand" with some other node.
//
bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
switch (OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
return false;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_RETFILT:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
// Variadic nodes
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
if (this->AsUnOp()->gtOp1->gtOper == GT_FIELD_LIST)
{
return this->AsUnOp()->gtOp1->TryGetUse(operand, pUse);
}
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
#endif // FEATURE_ARG_SPLIT
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
for (GenTree** opUse : this->AsMultiOp()->UseEdges())
{
if (*opUse == operand)
{
*pUse = opUse;
return true;
}
}
return false;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& phiUse : AsPhi()->Uses())
{
if (phiUse.GetNode() == operand)
{
*pUse = &phiUse.NodeRef();
return true;
}
}
return false;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& fieldUse : AsFieldList()->Uses())
{
if (fieldUse.GetNode() == operand)
{
*pUse = &fieldUse.NodeRef();
return true;
}
}
return false;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg();
if (operand == cmpXchg->gtOpLocation)
{
*pUse = &cmpXchg->gtOpLocation;
return true;
}
if (operand == cmpXchg->gtOpValue)
{
*pUse = &cmpXchg->gtOpValue;
return true;
}
if (operand == cmpXchg->gtOpComparand)
{
*pUse = &cmpXchg->gtOpComparand;
return true;
}
return false;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = this->AsArrElem();
if (operand == arrElem->gtArrObj)
{
*pUse = &arrElem->gtArrObj;
return true;
}
for (unsigned i = 0; i < arrElem->gtArrRank; i++)
{
if (operand == arrElem->gtArrInds[i])
{
*pUse = &arrElem->gtArrInds[i];
return true;
}
}
return false;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = this->AsArrOffs();
if (operand == arrOffs->gtOffset)
{
*pUse = &arrOffs->gtOffset;
return true;
}
if (operand == arrOffs->gtIndex)
{
*pUse = &arrOffs->gtIndex;
return true;
}
if (operand == arrOffs->gtArrObj)
{
*pUse = &arrOffs->gtArrObj;
return true;
}
return false;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
if (operand == dynBlock->gtOp1)
{
*pUse = &dynBlock->gtOp1;
return true;
}
if (operand == dynBlock->gtOp2)
{
*pUse = &dynBlock->gtOp2;
return true;
}
if (operand == dynBlock->gtDynamicSize)
{
*pUse = &dynBlock->gtDynamicSize;
return true;
}
return false;
}
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
*pUse = &call->gtCallThisArg->NodeRef();
return true;
}
if (operand == call->gtControlExpr)
{
*pUse = &call->gtControlExpr;
return true;
}
if (call->gtCallType == CT_INDIRECT)
{
if (operand == call->gtCallCookie)
{
*pUse = &call->gtCallCookie;
return true;
}
if (operand == call->gtCallAddr)
{
*pUse = &call->gtCallAddr;
return true;
}
}
for (GenTreeCall::Use& argUse : call->Args())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
for (GenTreeCall::Use& argUse : call->LateArgs())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
return false;
}
// Binary nodes
default:
assert(this->OperIsBinary());
return TryGetUseBinOp(operand, pUse);
}
}
bool GenTree::TryGetUseBinOp(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
assert(this->OperIsBinary());
GenTreeOp* const binOp = this->AsOp();
if (operand == binOp->gtOp1)
{
*pUse = &binOp->gtOp1;
return true;
}
if (operand == binOp->gtOp2)
{
*pUse = &binOp->gtOp2;
return true;
}
return false;
}
//------------------------------------------------------------------------
// GenTree::ReplaceOperand:
// Replace a given operand to this node with a new operand. If the
// current node is a call node, this will also udpate the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTree::ReplaceOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
if (OperGet() == GT_CALL)
{
AsCall()->ReplaceCallOperand(useEdge, replacement);
}
else
{
*useEdge = replacement;
}
}
//------------------------------------------------------------------------
// gtGetParent: Get the parent of this node, and optionally capture the
// pointer to the child so that it can be modified.
//
// Arguments:
// pUse - A pointer to a GenTree** (yes, that's three
// levels, i.e. GenTree ***), which if non-null,
// will be set to point to the field in the parent
// that points to this node.
//
// Return value
// The parent of this node.
//
// Notes:
// This requires that the execution order must be defined (i.e. gtSetEvalOrder() has been called).
// To enable the child to be replaced, it accepts an argument, "pUse", that, if non-null,
// will be set to point to the child pointer in the parent that points to this node.
//
GenTree* GenTree::gtGetParent(GenTree*** pUse)
{
// Find the parent node; it must be after this node in the execution order.
GenTree* user;
GenTree** use = nullptr;
for (user = gtNext; user != nullptr; user = user->gtNext)
{
if (user->TryGetUse(this, &use))
{
break;
}
}
if (pUse != nullptr)
{
*pUse = use;
}
return user;
}
//-------------------------------------------------------------------------
// gtRetExprVal - walk back through GT_RET_EXPRs
//
// Arguments:
// pbbFlags - out-parameter that is set to the flags of the basic block
// containing the inlinee return value. The value is 0
// for unsuccessful inlines.
//
// Returns:
// tree representing return value from a successful inline,
// or original call for failed or yet to be determined inline.
//
// Notes:
// Multi-level inlines can form chains of GT_RET_EXPRs.
// This method walks back to the root of the chain.
//
GenTree* GenTree::gtRetExprVal(BasicBlockFlags* pbbFlags /* = nullptr */)
{
GenTree* retExprVal = this;
BasicBlockFlags bbFlags = BBF_EMPTY;
assert(!retExprVal->OperIs(GT_PUTARG_TYPE));
while (retExprVal->OperIs(GT_RET_EXPR))
{
const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr();
bbFlags = retExpr->bbFlags;
retExprVal = retExpr->gtInlineCandidate;
}
if (pbbFlags != nullptr)
{
*pbbFlags = bbFlags;
}
return retExprVal;
}
//------------------------------------------------------------------------------
// OperRequiresAsgFlag : Check whether the operation requires GTF_ASG flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresAsgFlag()
{
if (OperIs(GT_ASG, GT_STORE_DYN_BLK) ||
OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER))
{
return true;
}
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
if (hwIntrinsicNode->OperIsMemoryStore())
{
// A MemoryStore operation is an assignment
return true;
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//------------------------------------------------------------------------------
// OperRequiresCallFlag : Check whether the operation requires GTF_CALL flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresCallFlag(Compiler* comp)
{
switch (gtOper)
{
case GT_CALL:
return true;
case GT_KEEPALIVE:
return true;
case GT_INTRINSIC:
return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicName);
#if FEATURE_FIXED_OUT_ARGS && !defined(TARGET_64BIT)
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// Variable shifts of a long end up being helper calls, so mark the tree as such in morph.
// This is potentially too conservative, since they'll get treated as having side effects.
// It is important to mark them as calls so if they are part of an argument list,
// they will get sorted and processed properly (for example, it is important to handle
// all nested calls before putting struct arguments in the argument registers). We
// could mark the trees just before argument processing, but it would require a full
// tree walk of the argument tree, so we just do it when morphing, instead, even though we'll
// mark non-argument trees (that will still get converted to calls, anyway).
return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT);
#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperIsImplicitIndir : Check whether the operation contains an implicit
// indirection.
// Arguments:
// this - a GenTree node
//
// Return Value:
// True if the given node contains an implicit indirection
//
// Note that for the [HW]INTRINSIC nodes we have to examine the
// details of the node to determine its result.
//
bool GenTree::OperIsImplicitIndir() const
{
switch (gtOper)
{
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_BOX:
case GT_ARR_INDEX:
case GT_ARR_ELEM:
case GT_ARR_OFFSET:
return true;
case GT_INTRINSIC:
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
#ifdef FEATURE_SIMD
case GT_SIMD:
{
return AsSIMD()->OperIsMemoryLoad();
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
return AsHWIntrinsic()->OperIsMemoryLoadOrStore();
}
#endif // FEATURE_HW_INTRINSICS
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperMayThrow : Check whether the operation may throw.
//
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if the given operator may cause an exception
bool GenTree::OperMayThrow(Compiler* comp)
{
GenTree* op;
switch (gtOper)
{
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
/* Division with a non-zero, non-minus-one constant does not throw an exception */
op = AsOp()->gtOp2;
if (varTypeIsFloating(op->TypeGet()))
{
return false; // Floating point division does not throw.
}
// For integers only division by 0 or by -1 can throw
if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
{
return false;
}
return true;
case GT_INTRINSIC:
// If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
// Currently, this is the only intrinsic that can throw an exception.
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
case GT_CALL:
CorInfoHelpFunc helper;
helper = comp->eeGetHelperNum(this->AsCall()->gtCallMethHnd);
return ((helper == CORINFO_HELP_UNDEF) || !comp->s_helperCallProperties.NoThrow(helper));
case GT_IND:
case GT_BLK:
case GT_OBJ:
case GT_NULLCHECK:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) && comp->fgAddrCouldBeNull(this->AsIndir()->Addr()));
case GT_ARR_LENGTH:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) &&
comp->fgAddrCouldBeNull(this->AsArrLen()->ArrRef()));
case GT_ARR_ELEM:
return comp->fgAddrCouldBeNull(this->AsArrElem()->gtArrObj);
case GT_FIELD:
{
GenTree* fldObj = this->AsField()->GetFldObj();
if (fldObj != nullptr)
{
return comp->fgAddrCouldBeNull(fldObj);
}
return false;
}
case GT_BOUNDS_CHECK:
case GT_ARR_INDEX:
case GT_ARR_OFFSET:
case GT_LCLHEAP:
case GT_CKFINITE:
case GT_INDEX_ADDR:
return true;
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
assert(hwIntrinsicNode != nullptr);
if (hwIntrinsicNode->OperIsMemoryLoadOrStore())
{
// This operation contains an implicit indirection
// it could throw a null reference exception.
//
return true;
}
break;
}
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
/* Overflow arithmetic operations also throw exceptions */
if (gtOverflowEx())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetFieldCount: Return the register count for a multi-reg lclVar.
//
// Arguments:
// compiler - the current Compiler instance.
//
// Return Value:
// Returns the number of registers defined by this node.
//
// Notes:
// This must be a multireg lclVar.
//
unsigned int GenTreeLclVar::GetFieldCount(Compiler* compiler) const
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
return varDsc->lvFieldCnt;
}
//-----------------------------------------------------------------------------------
// GetFieldTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// compiler - the current Compiler instance.
// idx - which register type to return.
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg lclVar and 'regIndex' must be a valid index for this node.
//
var_types GenTreeLclVar::GetFieldTypeByIndex(Compiler* compiler, unsigned idx)
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + idx);
assert(fieldVarDsc->TypeGet() != TYP_STRUCT); // Don't expect struct fields.
return fieldVarDsc->TypeGet();
}
#if DEBUGGABLE_GENTREE
// static
GenTree::VtablePtr GenTree::s_vtablesForOpers[] = {nullptr};
GenTree::VtablePtr GenTree::s_vtableForOp = nullptr;
GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
{
noway_assert(oper < GT_COUNT);
// First, check a cache.
if (s_vtablesForOpers[oper] != nullptr)
{
return s_vtablesForOpers[oper];
}
// Otherwise, look up the correct vtable entry. Note that we want the most derived GenTree subtype
// for an oper. E.g., GT_LCL_VAR is defined in GTSTRUCT_3 as GenTreeLclVar and in GTSTRUCT_N as
// GenTreeLclVarCommon. We want the GenTreeLclVar vtable, since nothing should actually be
// instantiated as a GenTreeLclVarCommon.
VtablePtr res = nullptr;
switch (oper)
{
// clang-format off
#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
#define GTSTRUCT_1(nm, tag) \
case tag: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_2(nm, tag, tag2) \
case tag: \
case tag2: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_3(nm, tag, tag2, tag3) \
case tag: \
case tag2: \
case tag3: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_4(nm, tag, tag2, tag3, tag4) \
case tag: \
case tag2: \
case tag3: \
case tag4: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
#define GTSTRUCT_2_SPECIAL(nm, tag, tag2) /*handle explicitly*/
#define GTSTRUCT_3_SPECIAL(nm, tag, tag2, tag3) /*handle explicitly*/
#include "gtstructs.h"
// clang-format on
// Handle the special cases.
// The following opers are in GTSTRUCT_N but no other place (namely, no subtypes).
case GT_STORE_BLK:
case GT_BLK:
{
GenTreeBlk gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
case GT_IND:
case GT_NULLCHECK:
{
GenTreeIndir gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
// We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified
// in their proper subtype. Similarly for GenTreeIndir.
default:
{
// Should be unary or binary op.
if (s_vtableForOp == nullptr)
{
unsigned opKind = OperKind(oper);
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
GenTreeIntCon dummyOp(TYP_INT, 0);
GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast<VtablePtr*>(>);
}
res = s_vtableForOp;
break;
}
}
s_vtablesForOpers[oper] = res;
return res;
}
void GenTree::SetVtableForOper(genTreeOps oper)
{
*reinterpret_cast<VtablePtr*>(this) = GetVtableForOper(oper);
}
#endif // DEBUGGABLE_GENTREE
GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
{
assert(op1 != nullptr);
assert(op2 != nullptr);
// We should not be allocating nodes that extend GenTreeOp with this;
// should call the appropriate constructor for the extended type.
assert(!GenTree::IsExOp(GenTree::OperKind(oper)));
GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, op2);
return node;
}
GenTreeColon* Compiler::gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode)
{
return new (this, GT_COLON) GenTreeColon(TYP_INT, elseNode, thenNode);
}
GenTreeQmark* Compiler::gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon)
{
compQmarkUsed = true;
GenTreeQmark* result = new (this, GT_QMARK) GenTreeQmark(type, cond, colon);
#ifdef DEBUG
if (compQmarkRationalized)
{
fgCheckQmarkAllowedForm(result);
}
#endif
return result;
}
GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
GenTreeIntCon* Compiler::gtNewNull()
{
return gtNewIconNode(0, TYP_REF);
}
GenTreeIntCon* Compiler::gtNewTrue()
{
return gtNewIconNode(1, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewFalse()
{
return gtNewIconNode(0, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq)
{
GenTreeIntCon* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset));
node->gtFieldSeq = fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq;
return node;
}
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
GenTree* Compiler::gtNewJmpTableNode()
{
return new (this, GT_JMPTABLE) GenTree(GT_JMPTABLE, TYP_I_IMPL);
}
/*****************************************************************************
*
* Converts an annotated token into an icon flags (so that we will later be
* able to tell the type of the handle that will be embedded in the icon
* node)
*/
GenTreeFlags Compiler::gtTokenToIconFlags(unsigned token)
{
GenTreeFlags flags = GTF_EMPTY;
switch (TypeFromToken(token))
{
case mdtTypeRef:
case mdtTypeDef:
case mdtTypeSpec:
flags = GTF_ICON_CLASS_HDL;
break;
case mdtMethodDef:
flags = GTF_ICON_METHOD_HDL;
break;
case mdtFieldDef:
flags = GTF_ICON_FIELD_HDL;
break;
default:
flags = GTF_ICON_TOKEN_HDL;
break;
}
return flags;
}
//-----------------------------------------------------------------------------------------
// gtNewIndOfIconHandleNode: Creates an indirection GenTree node of a constant handle
//
// Arguments:
// indType - The type returned by the indirection node
// addr - The constant address to read from
// iconFlags - The GTF_ICON flag value that specifies the kind of handle that we have
// isInvariant - The indNode should also be marked as invariant
//
// Return Value:
// Returns a GT_IND node representing value at the address provided by 'value'
//
// Notes:
// The GT_IND node is marked as non-faulting
// If the indType is GT_REF we also mark the indNode as GTF_GLOB_REF
//
GenTree* Compiler::gtNewIndOfIconHandleNode(var_types indType, size_t addr, GenTreeFlags iconFlags, bool isInvariant)
{
GenTree* addrNode = gtNewIconHandleNode(addr, iconFlags);
GenTree* indNode = gtNewOperNode(GT_IND, indType, addrNode);
// This indirection won't cause an exception.
//
indNode->gtFlags |= GTF_IND_NONFAULTING;
if (isInvariant)
{
assert(iconFlags != GTF_ICON_STATIC_HDL); // Pointer to a mutable class Static variable
assert(iconFlags != GTF_ICON_BBC_PTR); // Pointer to a mutable basic block count value
assert(iconFlags != GTF_ICON_GLOBAL_PTR); // Pointer to mutable data from the VM state
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
if (iconFlags == GTF_ICON_STR_HDL)
{
// String literals are never null
indNode->gtFlags |= GTF_IND_NONNULL;
}
}
else
{
// GLOB_REF needs to be set for indirections returning values from mutable
// locations, so that e. g. args sorting does not reorder them with calls.
indNode->gtFlags |= GTF_GLOB_REF;
}
return indNode;
}
/*****************************************************************************
*
* Allocates a integer constant entry that represents a HANDLE to something.
* It may not be allowed to embed HANDLEs directly into the JITed code (for eg,
* as arguments to JIT helpers). Get a corresponding value that can be embedded.
* If the handle needs to be accessed via an indirection, pValue points to it.
*/
GenTree* Compiler::gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags iconFlags, void* compileTimeHandle)
{
GenTree* iconNode;
GenTree* handleNode;
if (value != nullptr)
{
// When 'value' is non-null, pValue is required to be null
assert(pValue == nullptr);
// use 'value' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)value, iconFlags);
// 'value' is the handle
handleNode = iconNode;
}
else
{
// When 'value' is null, pValue is required to be non-null
assert(pValue != nullptr);
// use 'pValue' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)pValue, iconFlags);
// 'pValue' is an address of a location that contains the handle
// construct the indirection of 'pValue'
handleNode = gtNewOperNode(GT_IND, TYP_I_IMPL, iconNode);
// This indirection won't cause an exception.
handleNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
handleNode->gtFlags |= GTF_IND_INVARIANT;
}
iconNode->AsIntCon()->gtCompileTimeHandle = (size_t)compileTimeHandle;
return handleNode;
}
/*****************************************************************************/
GenTree* Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue)
{
GenTree* tree = nullptr;
switch (iat)
{
case IAT_VALUE:
setMethodHasFrozenString();
tree = gtNewIconEmbHndNode(pValue, nullptr, GTF_ICON_STR_HDL, nullptr);
tree->gtType = TYP_REF;
#ifdef DEBUG
tree->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PVALUE: // The value needs to be accessed via an indirection
// Create an indirection
tree = gtNewIndOfIconHandleNode(TYP_REF, (size_t)pValue, GTF_ICON_STR_HDL, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PPVALUE: // The value needs to be accessed via a double indirection
// Create the first indirection
tree = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pValue, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
// Create the second indirection
tree = gtNewOperNode(GT_IND, TYP_REF, tree);
// This indirection won't cause an exception.
tree->gtFlags |= GTF_IND_NONFAULTING;
// This indirection points into the gloabal heap (it is String Object)
tree->gtFlags |= GTF_GLOB_REF;
break;
default:
noway_assert(!"Unexpected InfoAccessType");
}
return tree;
}
//------------------------------------------------------------------------
// gtNewStringLiteralLength: create GenTreeIntCon node for the given string
// literal to store its length.
//
// Arguments:
// node - string literal node.
//
// Return Value:
// GenTreeIntCon node with string's length as a value or null.
//
GenTreeIntCon* Compiler::gtNewStringLiteralLength(GenTreeStrCon* node)
{
if (node->IsStringEmptyField())
{
JITDUMP("Folded String.Empty.Length to 0\n");
return gtNewIconNode(0);
}
int length = -1;
const char16_t* str = info.compCompHnd->getStringLiteral(node->gtScpHnd, node->gtSconCPX, &length);
if (length >= 0)
{
GenTreeIntCon* iconNode = gtNewIconNode(length);
// str can be NULL for dynamic context
if (str != nullptr)
{
JITDUMP("Folded '\"%ws\".Length' to '%d'\n", str, length)
}
else
{
JITDUMP("Folded 'CNS_STR.Length' to '%d'\n", length)
}
return iconNode;
}
return nullptr;
}
/*****************************************************************************/
GenTree* Compiler::gtNewLconNode(__int64 value)
{
#ifdef TARGET_64BIT
GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
#else
GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
#endif
return node;
}
GenTree* Compiler::gtNewDconNode(double value, var_types type)
{
GenTree* node = new (this, GT_CNS_DBL) GenTreeDblCon(value, type);
return node;
}
GenTree* Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
{
// 'GT_CNS_STR' nodes later get transformed into 'GT_CALL'
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_CNS_STR]);
GenTree* node = new (this, GT_CALL) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
return node;
}
GenTree* Compiler::gtNewZeroConNode(var_types type)
{
GenTree* zero;
switch (type)
{
case TYP_INT:
zero = gtNewIconNode(0);
break;
case TYP_BYREF:
FALLTHROUGH;
case TYP_REF:
zero = gtNewIconNode(0);
zero->gtType = type;
break;
case TYP_LONG:
zero = gtNewLconNode(0);
break;
case TYP_FLOAT:
zero = gtNewDconNode(0.0);
zero->gtType = type;
break;
case TYP_DOUBLE:
zero = gtNewDconNode(0.0);
break;
default:
noway_assert(!"Bad type in gtNewZeroConNode");
zero = nullptr;
break;
}
return zero;
}
GenTree* Compiler::gtNewOneConNode(var_types type)
{
GenTree* one;
switch (type)
{
case TYP_INT:
case TYP_UINT:
one = gtNewIconNode(1);
break;
case TYP_LONG:
case TYP_ULONG:
one = gtNewLconNode(1);
break;
case TYP_FLOAT:
case TYP_DOUBLE:
one = gtNewDconNode(1.0);
one->gtType = type;
break;
default:
noway_assert(!"Bad type in gtNewOneConNode");
one = nullptr;
break;
}
return one;
}
GenTreeLclVar* Compiler::gtNewStoreLclVar(unsigned dstLclNum, GenTree* src)
{
GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, src->TypeGet(), dstLclNum);
store->gtOp1 = src;
store->gtFlags = (src->gtFlags & GTF_COMMON_MASK);
store->gtFlags |= GTF_VAR_DEF | GTF_ASG;
return store;
}
#ifdef FEATURE_SIMD
//---------------------------------------------------------------------
// gtNewSIMDVectorZero: create a GT_SIMD node for Vector<T>.Zero
//
// Arguments:
// simdType - simd vector type
// simdBaseJitType - element type of vector
// simdSize - size of vector in bytes
GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = genActualType(JitType2PreciseVarType(simdBaseJitType));
GenTree* initVal = gtNewZeroConNode(simdBaseType);
initVal->gtType = simdBaseType;
return gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, simdBaseJitType, simdSize);
}
#endif // FEATURE_SIMD
GenTreeCall* Compiler::gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
return gtNewCallNode(CT_INDIRECT, (CORINFO_METHOD_HANDLE)addr, type, args, di);
}
GenTreeCall* Compiler::gtNewCallNode(
gtCallTypes callType, CORINFO_METHOD_HANDLE callHnd, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
GenTreeCall* node = new (this, GT_CALL) GenTreeCall(genActualType(type));
node->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
#ifdef UNIX_X86_ABI
if (callType == CT_INDIRECT || callType == CT_HELPER)
node->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
node->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
node->gtCallType = callType;
node->gtCallMethHnd = callHnd;
node->gtCallArgs = args;
node->gtCallThisArg = nullptr;
node->fgArgInfo = nullptr;
INDEBUG(node->callSig = nullptr;)
node->tailCallInfo = nullptr;
node->gtRetClsHnd = nullptr;
node->gtControlExpr = nullptr;
node->gtCallMoreFlags = GTF_CALL_M_EMPTY;
if (callType == CT_INDIRECT)
{
node->gtCallCookie = nullptr;
}
else
{
node->gtInlineCandidateInfo = nullptr;
}
node->gtCallLateArgs = nullptr;
node->gtReturnType = type;
#ifdef FEATURE_READYTORUN
node->gtEntryPoint.addr = nullptr;
node->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These get updated after call node is built.
node->gtInlineObservation = InlineObservation::CALLEE_UNUSED_INITIAL;
node->gtRawILOffset = BAD_IL_OFFSET;
node->gtInlineContext = compInlineContext;
#endif
// Spec: Managed Retval sequence points needs to be generated while generating debug info for debuggable code.
//
// Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
// codegen will pass DebugInfo() to emitter, which will cause emitter
// not to emit IP mapping entry.
if (opts.compDbgCode && opts.compDbgInfo && di.IsValid())
{
// Managed Retval - IL offset of the call. This offset is used to emit a
// CALL_INSTRUCTION type sequence point while emitting corresponding native call.
//
// TODO-Cleanup:
// a) (Opt) We need not store this offset if the method doesn't return a
// value. Rather it can be made BAD_IL_OFFSET to prevent a sequence
// point being emitted.
//
// b) (Opt) Add new sequence points only if requested by debugger through
// a new boundary type - ICorDebugInfo::BoundaryTypes
if (genCallSite2DebugInfoMap == nullptr)
{
genCallSite2DebugInfoMap = new (getAllocator()) CallSiteDebugInfoTable(getAllocator());
}
// Make sure that there are no duplicate entries for a given call node
assert(!genCallSite2DebugInfoMap->Lookup(node));
genCallSite2DebugInfoMap->Set(node, di);
}
// Initialize gtOtherRegs
node->ClearOtherRegs();
// Initialize spill flags of gtOtherRegs
node->ClearOtherRegFlags();
#if !defined(TARGET_64BIT)
if (varTypeIsLong(node))
{
assert(node->gtReturnType == node->gtType);
// Initialize Return type descriptor of call node
node->InitializeLongReturnType();
}
#endif // !defined(TARGET_64BIT)
return node;
}
GenTreeLclVar* Compiler::gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
assert(type != TYP_VOID);
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
LclVarDsc* varDsc = lvaGetDesc(lnum);
bool simd12ToSimd16Widening = false;
#if FEATURE_SIMD
// We can additionally have a SIMD12 that was widened to a SIMD16, generally as part of lowering
simd12ToSimd16Widening = (type == TYP_SIMD16) && (varDsc->lvType == TYP_SIMD12);
#endif
assert((type == varDsc->lvType) || simd12ToSimd16Widening ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (varDsc->lvType == TYP_BYREF)));
}
GenTreeLclVar* node = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs));
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
return node;
}
GenTreeLclVar* Compiler::gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
assert(type == lvaTable[lnum].lvType ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (lvaTable[lnum].lvType == TYP_BYREF)));
}
// This local variable node may later get transformed into a large node
assert(GenTree::s_gtNodeSizes[LargeOpOpcode()] > GenTree::s_gtNodeSizes[GT_LCL_VAR]);
GenTreeLclVar* node =
new (this, LargeOpOpcode()) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs) DEBUGARG(/*largeNode*/ true));
return node;
}
GenTreeLclVar* Compiler::gtNewLclVarAddrNode(unsigned lclNum, var_types type)
{
GenTreeLclVar* node = new (this, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, type, lclNum);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, type, lclNum, lclOffs);
node->SetFieldSeq(fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, type, lnum, offset);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
node->SetFieldSeq(FieldSeqStore::NotAField());
return node;
}
GenTree* Compiler::gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags)
{
assert(GenTree::s_gtNodeSizes[GT_RET_EXPR] == TREE_NODE_SZ_LARGE);
GenTreeRetExpr* node = new (this, GT_RET_EXPR) GenTreeRetExpr(type);
node->gtInlineCandidate = inlineCandidate;
node->bbFlags = bbFlags;
if (varTypeIsStruct(inlineCandidate) && !inlineCandidate->OperIsBlkOp())
{
node->gtRetClsHnd = gtGetStructHandle(inlineCandidate);
}
// GT_RET_EXPR node eventually might be bashed back to GT_CALL (when inlining is aborted for example).
// Therefore it should carry the GTF_CALL flag so that all the rules about spilling can apply to it as well.
// For example, impImportLeave or CEE_POP need to spill GT_RET_EXPR before empty the evaluation stack.
node->gtFlags |= GTF_CALL;
return node;
}
GenTreeCall::Use* Compiler::gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node, args);
}
GenTreeCall::Use* Compiler::gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after)
{
after->SetNext(new (this, CMK_ASTNode) GenTreeCall::Use(node, after->GetNext()));
return after->GetNext();
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node);
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3, node4));
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching argNum and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
return argInfo->GetArgEntry(argNum);
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching node and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByNode(GenTreeCall* call, GenTree* node)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->GetNode() == node)
{
return curArgTabEntry;
}
else if (curArgTabEntry->use->GetNode() == node)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
/*****************************************************************************
*
* Find and return the entry with the given "lateArgInx". Requires that one is found
* (asserts this).
*/
fgArgTabEntry* Compiler::gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
assert(lateArgInx != UINT_MAX);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->isLateArg() && curArgTabEntry->GetLateArgInx() == lateArgInx)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
//------------------------------------------------------------------------
// gtArgNodeByLateArgInx: Given a call instruction, find the argument with the given
// late arg index (i.e. the given position in the gtCallLateArgs list).
// Arguments:
// call - the call node
// lateArgInx - the index into the late args list
//
// Return value:
// The late argument node.
//
GenTree* Compiler::gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx)
{
GenTree* argx = nullptr;
unsigned regIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
argx = use.GetNode();
assert(!argx->IsArgPlaceHolderNode()); // No placeholder nodes are in gtCallLateArgs;
if (regIndex == lateArgInx)
{
break;
}
regIndex++;
}
noway_assert(argx != nullptr);
return argx;
}
/*****************************************************************************
*
* Create a node that will assign 'src' to 'dst'.
*/
GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
{
assert(!src->TypeIs(TYP_VOID));
/* Mark the target as being assigned */
if ((dst->gtOper == GT_LCL_VAR) || (dst->OperGet() == GT_LCL_FLD))
{
dst->gtFlags |= GTF_VAR_DEF;
if (dst->IsPartialLclFld(this))
{
// We treat these partial writes as combined uses and defs.
dst->gtFlags |= GTF_VAR_USEASG;
}
}
dst->gtFlags |= GTF_DONT_CSE;
#if defined(FEATURE_SIMD) && !defined(TARGET_X86)
// TODO-CQ: x86 Windows supports multi-reg returns but not SIMD multi-reg returns
if (varTypeIsSIMD(dst->gtType))
{
// We want to track SIMD assignments as being intrinsics since they
// are functionally SIMD `mov` instructions and are more efficient
// when we don't promote, particularly when it occurs due to inlining
SetOpLclRelatedToSIMDIntrinsic(dst);
SetOpLclRelatedToSIMDIntrinsic(src);
}
#endif // FEATURE_SIMD
/* Create the assignment node */
GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp();
/* Mark the expression as containing an assignment */
asg->gtFlags |= GTF_ASG;
return asg;
}
//------------------------------------------------------------------------
// gtNewObjNode: Creates a new Obj node.
//
// Arguments:
// structHnd - The class handle of the struct type.
// addr - The address of the struct.
//
// Return Value:
// Returns a node representing the struct value at the given address.
//
GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd));
// An Obj is not a global reference, if it is known to be a local struct.
if ((addr->gtFlags & GTF_GLOB_REF) == 0)
{
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if (lclNode != nullptr)
{
objNode->gtFlags |= GTF_IND_NONFAULTING;
if (!lvaIsImplicitByRefLocal(lclNode->GetLclNum()))
{
objNode->gtFlags &= ~GTF_GLOB_REF;
}
}
}
return objNode;
}
//------------------------------------------------------------------------
// gtSetObjGcInfo: Set the GC info on an object node
//
// Arguments:
// objNode - The object node of interest
void Compiler::gtSetObjGcInfo(GenTreeObj* objNode)
{
assert(varTypeIsStruct(objNode->TypeGet()));
assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle()));
if (!objNode->GetLayout()->HasGCPtr())
{
objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK);
}
}
//------------------------------------------------------------------------
// gtNewStructVal: Return a node that represents a struct value
//
// Arguments:
// structHnd - The class for the struct
// addr - The address of the struct
//
// Return Value:
// A block, object or local node that represents the struct value pointed to by 'addr'.
GenTree* Compiler::gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
if (val->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = addr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = &(lvaTable[lclNum]);
if (varTypeIsStruct(varDsc) && (varDsc->GetStructHnd() == structHnd) && !lvaIsImplicitByRefLocal(lclNum))
{
return addr->gtGetOp1();
}
}
}
return gtNewObjNode(structHnd, addr);
}
//------------------------------------------------------------------------
// gtNewBlockVal: Return a node that represents a possibly untyped block value
//
// Arguments:
// addr - The address of the block
// size - The size of the block
//
// Return Value:
// A block, object or local node that represents the block value pointed to by 'addr'.
GenTree* Compiler::gtNewBlockVal(GenTree* addr, unsigned size)
{
// By default we treat this as an opaque struct type with known size.
var_types blkType = TYP_STRUCT;
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
#if FEATURE_SIMD
if (varTypeIsSIMD(val) && (genTypeSize(val) == size))
{
blkType = val->TypeGet();
}
#endif // FEATURE_SIMD
if (varTypeIsStruct(val) && val->OperIs(GT_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(val->AsLclVarCommon());
unsigned varSize = varTypeIsStruct(varDsc) ? varDsc->lvExactSize : genTypeSize(varDsc);
if (varSize == size)
{
return val;
}
}
}
return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size));
}
// Creates a new assignment node for a CpObj.
// Parameters (exactly the same as MSIL CpObj):
//
// dstAddr - The target to copy the struct to
// srcAddr - The source to copy the struct from
// structHnd - A class token that represents the type of object being copied. May be null
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
GenTree* Compiler::gtNewCpObjNode(GenTree* dstAddr, GenTree* srcAddr, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
{
GenTree* lhs = gtNewStructVal(structHnd, dstAddr);
GenTree* src = nullptr;
if (lhs->OperIs(GT_OBJ))
{
GenTreeObj* lhsObj = lhs->AsObj();
#if DEBUG
// Codegen for CpObj assumes that we cannot have a struct with GC pointers whose size is not a multiple
// of the register size. The EE currently does not allow this to ensure that GC pointers are aligned
// if the struct is stored in an array. Note that this restriction doesn't apply to stack-allocated objects:
// they are never stored in arrays. We should never get to this method with stack-allocated objects since they
// are never copied so we don't need to exclude them from the assert below.
// Let's assert it just to be safe.
ClassLayout* layout = lhsObj->GetLayout();
unsigned size = layout->GetSize();
assert((layout->GetGCPtrCount() == 0) || (roundUp(size, REGSIZE_BYTES) == size));
#endif
gtSetObjGcInfo(lhsObj);
}
if (srcAddr->OperGet() == GT_ADDR)
{
src = srcAddr->AsOp()->gtOp1;
}
else
{
src = gtNewOperNode(GT_IND, lhs->TypeGet(), srcAddr);
}
GenTree* result = gtNewBlkOpNode(lhs, src, isVolatile, true);
return result;
}
//------------------------------------------------------------------------
// FixupInitBlkValue: Fixup the init value for an initBlk operation
//
// Arguments:
// asgType - The type of assignment that the initBlk is being transformed into
//
// Return Value:
// Modifies the constant value on this node to be the appropriate "fill"
// value for the initblk.
//
// Notes:
// The initBlk MSIL instruction takes a byte value, which must be
// extended to the size of the assignment when an initBlk is transformed
// to an assignment of a primitive type.
// This performs the appropriate extension.
void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
unsigned size = genTypeSize(asgType);
if (size > 1)
{
size_t cns = gtIconVal;
cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
{
cns |= cns << 16;
#ifdef TARGET_64BIT
if (size == 8)
{
cns |= cns << 32;
}
#endif // TARGET_64BIT
// Make the type match for evaluation types.
gtType = asgType;
// if we are initializing a GC type the value being assigned must be zero (null).
assert(!varTypeIsGC(asgType) || (cns == 0));
}
gtIconVal = cns;
}
}
//----------------------------------------------------------------------------
// UsesDivideByConstOptimized:
// returns true if rationalize will use the division by constant
// optimization for this node.
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
// Return Value:
// Return true iff the node is a GT_DIV,GT_UDIV, GT_MOD or GT_UMOD with
// an integer constant and we can perform the division operation using
// a reciprocal multiply or a shift operation.
//
bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp)
{
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (!OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD))
{
return false;
}
#if defined(TARGET_ARM64)
if (OperIs(GT_MOD, GT_UMOD))
{
// MOD, UMOD not supported for ARM64
return false;
}
#endif // TARGET_ARM64
bool isSignedDivide = OperIs(GT_DIV, GT_MOD);
GenTree* dividend = gtGetOp1()->gtEffectiveVal(/*commaOnly*/ true);
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
ssize_t divisorValue;
if (divisor->IsCnsIntOrI())
{
divisorValue = static_cast<ssize_t>(divisor->AsIntCon()->IconValue());
}
else
{
ValueNum vn = divisor->gtVNPair.GetLiberal();
if (comp->vnStore->IsVNConstant(vn))
{
divisorValue = comp->vnStore->CoercedConstantValue<ssize_t>(vn);
}
else
{
return false;
}
}
const var_types divType = TypeGet();
if (divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
return false;
}
else if (isSignedDivide)
{
if (divisorValue == -1)
{
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
return false;
}
else if (isPow2(divisorValue))
{
return true;
}
}
else // unsigned divide
{
if (divType == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
size_t unsignedDivisorValue = (size_t)divisorValue;
if (isPow2(unsignedDivisorValue))
{
return true;
}
}
const bool isDiv = OperIs(GT_DIV, GT_UDIV);
if (isDiv)
{
if (isSignedDivide)
{
// If the divisor is the minimum representable integer value then the result is either 0 or 1
if ((divType == TYP_INT && divisorValue == INT_MIN) || (divType == TYP_LONG && divisorValue == INT64_MIN))
{
return true;
}
}
else
{
// If the divisor is greater or equal than 2^(N - 1) then the result is either 0 or 1
if (((divType == TYP_INT) && ((UINT32)divisorValue > (UINT32_MAX / 2))) ||
((divType == TYP_LONG) && ((UINT64)divisorValue > (UINT64_MAX / 2))))
{
return true;
}
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
if (!comp->opts.MinOpts() && ((divisorValue >= 3) || !isSignedDivide))
{
// All checks pass we can perform the division operation using a reciprocal multiply.
return true;
}
#endif
return false;
}
//------------------------------------------------------------------------
// CheckDivideByConstOptimized:
// Checks if we can use the division by constant optimization
// on this node
// and if so sets the flag GTF_DIV_BY_CNS_OPT and
// set GTF_DONT_CSE on the constant node
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
void GenTreeOp::CheckDivideByConstOptimized(Compiler* comp)
{
if (UsesDivideByConstOptimized(comp))
{
gtFlags |= GTF_DIV_BY_CNS_OPT;
// Now set DONT_CSE on the GT_CNS_INT divisor, note that
// with ValueNumbering we can have a non GT_CNS_INT divisior
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
if (divisor->OperIs(GT_CNS_INT))
{
divisor->gtFlags |= GTF_DONT_CSE;
}
}
}
//
//------------------------------------------------------------------------
// gtBlockOpInit: Initializes a BlkOp GenTree
//
// Arguments:
// result - an assignment node that is to be initialized.
// dst - the target (destination) we want to either initialize or copy to.
// src - the init value for InitBlk or the source struct for CpBlk/CpObj.
// isVolatile - specifies whether this node is a volatile memory operation.
//
// Assumptions:
// 'result' is an assignment that is newly constructed.
// If 'dst' is TYP_STRUCT, then it must be a block node or lclVar.
//
// Notes:
// This procedure centralizes all the logic to both enforce proper structure and
// to properly construct any InitBlk/CpBlk node.
void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
{
if (!result->OperIsBlkOp())
{
assert(dst->TypeGet() != TYP_STRUCT);
return;
}
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
* because of two reasons, first, is useless, second
* it introduces issues in liveness and also copying
* memory from an overlapping memory location is
* undefined both as per the ECMA standard and also
* the memcpy semantics specify that.
*
* NOTE: In this case we'll only detect the case for addr of a local
* and a local itself, any other complex expressions won't be
* caught.
*
* TODO-Cleanup: though having this logic is goodness (i.e. avoids self-assignment
* of struct vars very early), it was added because fgInterBlockLocalVarLiveness()
* isn't handling self-assignment of struct variables correctly. This issue may not
* surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
if (result->OperIsCopyBlkOp())
{
GenTree* currSrc = srcOrFillVal;
GenTree* currDst = dst;
if (currSrc->OperIsBlk() && (currSrc->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currSrc = currSrc->AsBlk()->Addr()->gtGetOp1();
}
if (currDst->OperIsBlk() && (currDst->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currDst = currDst->AsBlk()->Addr()->gtGetOp1();
}
if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->AsLclVarCommon()->GetLclNum() == currDst->AsLclVarCommon()->GetLclNum())
{
// Make this a NOP
// TODO-Cleanup: probably doesn't matter, but could do this earlier and avoid creating a GT_ASG
result->gtBashToNOP();
return;
}
}
// Propagate all effect flags from children
result->gtFlags |= dst->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= result->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT);
if (isVolatile)
{
result->gtFlags |= GTF_BLK_VOLATILE;
}
#ifdef FEATURE_SIMD
if (result->OperIsCopyBlkOp() && varTypeIsSIMD(srcOrFillVal))
{
// If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
// should be labeled as simd intrinsic related struct.
// This is done so that the morpher can transform any field accesses into
// intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
GenTree* src = srcOrFillVal;
if (src->OperIsIndir() && (src->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
src = src->AsIndir()->Addr()->gtGetOp1();
}
#ifdef FEATURE_HW_INTRINSICS
if ((src->OperGet() == GT_SIMD) || (src->OperGet() == GT_HWINTRINSIC))
#else
if (src->OperGet() == GT_SIMD)
#endif // FEATURE_HW_INTRINSICS
{
if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
dst = dst->AsIndir()->Addr()->gtGetOp1();
}
if (dst->OperIsLocal() && varTypeIsStruct(dst))
{
setLclRelatedToSIMDIntrinsic(dst);
}
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
//
// Arguments:
// dst - The destination node: local var / block node.
// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
// isVolatile - Whether this is a volatile memory operation or not.
// isCopyBlock - True if this is a block copy (rather than a block init).
//
// Return Value:
// Returns the newly constructed and initialized block operation.
//
GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock)
{
assert(dst->OperIsBlk() || dst->OperIsLocal());
if (isCopyBlock)
{
if (srcOrFillVal->OperIsIndir() && (srcOrFillVal->gtGetOp1()->gtOper == GT_ADDR))
{
srcOrFillVal = srcOrFillVal->gtGetOp1()->gtGetOp1();
}
}
else
{
// InitBlk
assert(varTypeIsIntegral(srcOrFillVal));
if (varTypeIsStruct(dst))
{
if (!srcOrFillVal->IsIntegralConst(0))
{
srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
}
}
}
GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
gtBlockOpInit(result, dst, srcOrFillVal, isVolatile);
return result;
}
//------------------------------------------------------------------------
// gtNewPutArgReg: Creates a new PutArgReg node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created PutArgReg node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/armel, GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg)
{
assert(arg != nullptr);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A PUTARG_REG could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_PUTARG_REG) GenTreeMultiRegOp(GT_PUTARG_REG, type, arg, nullptr);
if (type == TYP_LONG)
{
node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg);
}
#else
node = gtNewOperNode(GT_PUTARG_REG, type, arg);
#endif
node->SetRegNum(argReg);
return node;
}
//------------------------------------------------------------------------
// gtNewBitCastNode: Creates a new BitCast node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created BitCast node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/arm, as GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg)
{
assert(arg != nullptr);
assert(type != TYP_STRUCT);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr);
#else
node = gtNewOperNode(GT_BITCAST, type, arg);
#endif
return node;
}
//------------------------------------------------------------------------
// gtNewAllocObjNode: Helper to create an object allocation node.
//
// Arguments:
// pResolvedToken - Resolved token for the object being allocated
// useParent - true iff the token represents a child of the object's class
//
// Return Value:
// Returns GT_ALLOCOBJ node that will be later morphed into an
// allocation helper call or local variable allocation on the stack.
//
// Node creation can fail for inlinees when the type described by pResolvedToken
// can't be represented in jitted code. If this happens, this method will return
// nullptr.
//
GenTreeAllocObj* Compiler::gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent)
{
const bool mustRestoreHandle = true;
bool* const pRuntimeLookup = nullptr;
bool usingReadyToRunHelper = false;
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
GenTree* opHandle = impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, useParent);
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP lookup = {};
if (opts.IsReadyToRun())
{
helper = CORINFO_HELP_READYTORUN_NEW;
CORINFO_LOOKUP_KIND* const pGenericLookupKind = nullptr;
usingReadyToRunHelper =
info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup);
}
#endif
if (!usingReadyToRunHelper)
{
if (opHandle == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return nullptr;
}
}
bool helperHasSideEffects;
CorInfoHelpFunc helperTemp =
info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd, &helperHasSideEffects);
if (!usingReadyToRunHelper)
{
helper = helperTemp;
}
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newfast call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate and return the new object for boxing
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
GenTreeAllocObj* allocObj =
gtNewAllocObjNode(helper, helperHasSideEffects, pResolvedToken->hClass, TYP_REF, opHandle);
#ifdef FEATURE_READYTORUN
if (usingReadyToRunHelper)
{
assert(lookup.addr != nullptr);
allocObj->gtEntryPoint = lookup;
}
#endif
return allocObj;
}
/*****************************************************************************
*
* Clones the given tree value and returns a copy of the given tree.
* If 'complexOK' is false, the cloning is only done provided the tree
* is not too complex (whatever that may mean);
* If 'complexOK' is true, we try slightly harder to clone the tree.
* In either case, NULL is returned if the tree cannot be cloned
*
* Note that there is the function gtCloneExpr() which does a more
* complete job if you can't handle this function failing.
*/
GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
{
GenTree* copy;
switch (tree->gtOper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy = gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = new (this, GT_CNS_INT)
GenTreeIntCon(tree->gtType, tree->AsIntCon()->gtIconVal, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
break;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
break;
case GT_LCL_VAR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVarCommon()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
break;
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = new (this, tree->OperGet())
GenTreeLclFld(tree->OperGet(), tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
break;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->gtType, tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
break;
default:
if (!complexOK)
{
return nullptr;
}
if (tree->gtOper == GT_FIELD)
{
GenTree* objp = nullptr;
if (tree->AsField()->GetFldObj() != nullptr)
{
objp = gtClone(tree->AsField()->GetFldObj(), false);
if (objp == nullptr)
{
return nullptr;
}
}
copy = gtNewFieldRef(tree->TypeGet(), tree->AsField()->gtFldHnd, objp, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
}
else if (tree->OperIs(GT_ADD, GT_SUB))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->OperIsLeaf() && op2->OperIsLeaf())
{
op1 = gtClone(op1);
if (op1 == nullptr)
{
return nullptr;
}
op2 = gtClone(op2);
if (op2 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(tree->OperGet(), tree->TypeGet(), op1, op2);
}
else
{
return nullptr;
}
}
else if (tree->gtOper == GT_ADDR)
{
GenTree* op1 = gtClone(tree->AsOp()->gtOp1);
if (op1 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
}
else
{
return nullptr;
}
break;
}
copy->gtFlags |= tree->gtFlags & ~GTF_NODE_MASK;
#if defined(DEBUG)
copy->gtDebugFlags |= tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK;
#endif // defined(DEBUG)
return copy;
}
//------------------------------------------------------------------------
// gtCloneExpr: Create a copy of `tree`, adding flags `addFlags`, mapping
// local `varNum` to int constant `varVal` if it appears at
// the root, and mapping uses of local `deepVarNum` to constant
// `deepVarVal` if they occur beyond the root.
//
// Arguments:
// tree - GenTree to create a copy of
// addFlags - GTF_* flags to add to the copied tree nodes
// varNum - lclNum to replace at the root, or ~0 for no root replacement
// varVal - If replacing at root, replace local `varNum` with IntCns `varVal`
// deepVarNum - lclNum to replace uses of beyond the root, or ~0 for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Return Value:
// A copy of the given tree with the replacements and added flags specified.
//
// Notes:
// Top-level callers should generally call the overload that doesn't have
// the explicit `deepVarNum` and `deepVarVal` parameters; those are used in
// recursive invocations to avoid replacing defs.
GenTree* Compiler::gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal)
{
if (tree == nullptr)
{
return nullptr;
}
/* Figure out what kind of a node we have */
genTreeOps oper = tree->OperGet();
unsigned kind = tree->OperKind();
GenTree* copy;
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy =
gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = gtNewIconNode(tree->AsIntCon()->gtIconVal, tree->gtType);
#ifdef DEBUG
copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
#endif
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->AsIntCon()->gtFieldSeq = tree->AsIntCon()->gtFieldSeq;
}
goto DONE;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
goto DONE;
case GT_CNS_DBL:
copy = gtNewDconNode(tree->AsDblCon()->gtDconVal);
copy->gtType = tree->gtType; // keep the same type
goto DONE;
case GT_CNS_STR:
copy = gtNewSconNode(tree->AsStrCon()->gtSconCPX, tree->AsStrCon()->gtScpHnd);
goto DONE;
case GT_LCL_VAR:
if (tree->AsLclVarCommon()->GetLclNum() == varNum)
{
copy = gtNewIconNode(varVal, tree->gtType);
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
copy->LabelIndex(this);
}
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVar()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
}
goto DONE;
case GT_LCL_FLD:
if (tree->AsLclFld()->GetLclNum() == varNum)
{
IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy =
new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
copy->gtFlags = tree->gtFlags;
}
goto DONE;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->TypeGet(), tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
goto DONE;
case GT_RET_EXPR:
// GT_RET_EXPR is unique node, that contains a link to a gtInlineCandidate node,
// that is part of another statement. We cannot clone both here and cannot
// create another GT_RET_EXPR that points to the same gtInlineCandidate.
NO_WAY("Cloning of GT_RET_EXPR node not supported");
goto DONE;
case GT_MEMORYBARRIER:
copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
goto DONE;
case GT_ARGPLACE:
copy = gtNewArgPlaceHolderNode(tree->gtType, tree->AsArgPlace()->gtArgPlaceClsHnd);
goto DONE;
case GT_FTN_ADDR:
copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->AsFptrVal()->gtFptrMethod);
#ifdef FEATURE_READYTORUN
copy->AsFptrVal()->gtEntryPoint = tree->AsFptrVal()->gtEntryPoint;
#endif
goto DONE;
case GT_CATCH_ARG:
case GT_NO_OP:
case GT_LABEL:
copy = new (this, oper) GenTree(oper, tree->gtType);
goto DONE;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_JMP:
copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1);
goto DONE;
case GT_LCL_VAR_ADDR:
copy = new (this, oper) GenTreeLclVar(oper, tree->TypeGet(), tree->AsLclVar()->GetLclNum());
goto DONE;
case GT_LCL_FLD_ADDR:
copy = new (this, oper)
GenTreeLclFld(oper, tree->TypeGet(), tree->AsLclFld()->GetLclNum(), tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
goto DONE;
default:
NO_WAY("Cloning of node not supported");
goto DONE;
}
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
CLANG_FORMAT_COMMENT_ANCHOR;
switch (oper)
{
/* These nodes sometimes get bashed to "fat" ones */
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
// In the implementation of gtNewLargeOperNode you have
// to give an oper that will create a small node,
// otherwise it asserts.
//
if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1,
tree->OperIsBinary() ? tree->AsOp()->gtOp2 : nullptr);
}
else // Always a large tree
{
if (tree->OperIsBinary())
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
else
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1);
}
}
break;
case GT_CAST:
copy = new (this, LargeOpOpcode())
GenTreeCast(tree->TypeGet(), tree->AsCast()->CastOp(), tree->IsUnsigned(),
tree->AsCast()->gtCastType DEBUGARG(/*largeNode*/ TRUE));
break;
case GT_INDEX:
{
GenTreeIndex* asInd = tree->AsIndex();
copy = new (this, GT_INDEX)
GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
copy->AsIndex()->gtStructElemClass = asInd->gtStructElemClass;
}
break;
case GT_INDEX_ADDR:
{
GenTreeIndexAddr* asIndAddr = tree->AsIndexAddr();
copy = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(asIndAddr->Arr(), asIndAddr->Index(), asIndAddr->gtElemType,
asIndAddr->gtStructElemClass, asIndAddr->gtElemSize, asIndAddr->gtLenOffset,
asIndAddr->gtElemOffset);
copy->AsIndexAddr()->gtIndRngFailBB = asIndAddr->gtIndRngFailBB;
}
break;
case GT_ALLOCOBJ:
{
GenTreeAllocObj* asAllocObj = tree->AsAllocObj();
copy = new (this, GT_ALLOCOBJ)
GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper, asAllocObj->gtHelperHasSideEffects,
asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
#ifdef FEATURE_READYTORUN
copy->AsAllocObj()->gtEntryPoint = asAllocObj->gtEntryPoint;
#endif
}
break;
case GT_RUNTIMELOOKUP:
{
GenTreeRuntimeLookup* asRuntimeLookup = tree->AsRuntimeLookup();
copy = new (this, GT_RUNTIMELOOKUP)
GenTreeRuntimeLookup(asRuntimeLookup->gtHnd, asRuntimeLookup->gtHndType, asRuntimeLookup->gtOp1);
}
break;
case GT_ARR_LENGTH:
copy = gtNewArrLen(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsArrLen()->ArrLenOffset(), nullptr);
break;
case GT_ARR_INDEX:
copy = new (this, GT_ARR_INDEX)
GenTreeArrIndex(tree->TypeGet(),
gtCloneExpr(tree->AsArrIndex()->ArrObj(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrIndex()->IndexExpr(), addFlags, deepVarNum, deepVarVal),
tree->AsArrIndex()->gtCurrDim, tree->AsArrIndex()->gtArrRank,
tree->AsArrIndex()->gtArrElemType);
break;
case GT_QMARK:
copy = new (this, GT_QMARK)
GenTreeQmark(tree->TypeGet(), tree->AsOp()->gtGetOp1(), tree->AsOp()->gtGetOp2()->AsColon());
break;
case GT_OBJ:
copy =
new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout());
break;
case GT_BLK:
copy = new (this, GT_BLK)
GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout());
break;
case GT_FIELD:
copy = new (this, GT_FIELD) GenTreeField(tree->TypeGet(), tree->AsField()->GetFldObj(),
tree->AsField()->gtFldHnd, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
break;
case GT_BOX:
copy = new (this, GT_BOX)
GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtAsgStmtWhenInlinedBoxValue,
tree->AsBox()->gtCopyStmtWhenInlinedBoxValue);
break;
case GT_INTRINSIC:
copy = new (this, GT_INTRINSIC)
GenTreeIntrinsic(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2,
tree->AsIntrinsic()->gtIntrinsicName, tree->AsIntrinsic()->gtMethodHandle);
#ifdef FEATURE_READYTORUN
copy->AsIntrinsic()->gtEntryPoint = tree->AsIntrinsic()->gtEntryPoint;
#endif
break;
case GT_BOUNDS_CHECK:
copy = new (this, GT_BOUNDS_CHECK)
GenTreeBoundsChk(tree->AsBoundsChk()->GetIndex(), tree->AsBoundsChk()->GetArrayLength(),
tree->AsBoundsChk()->gtThrowKind);
copy->AsBoundsChk()->gtIndRngFailBB = tree->AsBoundsChk()->gtIndRngFailBB;
break;
case GT_LEA:
{
GenTreeAddrMode* addrModeOp = tree->AsAddrMode();
copy = new (this, GT_LEA)
GenTreeAddrMode(addrModeOp->TypeGet(), addrModeOp->Base(), addrModeOp->Index(), addrModeOp->gtScale,
static_cast<unsigned>(addrModeOp->Offset()));
}
break;
case GT_COPY:
case GT_RELOAD:
{
copy = new (this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
}
break;
default:
assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
// We're in the SimpleOp case, so it's always unary or binary.
if (GenTree::OperIsUnary(tree->OperGet()))
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, /*doSimplifications*/ false);
}
else
{
assert(GenTree::OperIsBinary(tree->OperGet()));
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
break;
}
// Some flags are conceptually part of the gtOper, and should be copied immediately.
if (tree->gtOverflowEx())
{
copy->gtFlags |= GTF_OVERFLOW;
}
if (tree->AsOp()->gtOp1)
{
if (tree->gtOper == GT_ASG)
{
// Don't replace varNum if it appears as the LHS of an assign.
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, -1, 0, deepVarNum, deepVarVal);
}
else
{
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal);
}
}
if (tree->gtGetOp2IfPresent())
{
copy->AsOp()->gtOp2 = gtCloneExpr(tree->AsOp()->gtOp2, addFlags, deepVarNum, deepVarVal);
}
/* Flags */
addFlags |= tree->gtFlags;
// Copy any node annotations, if necessary.
switch (tree->gtOper)
{
case GT_STOREIND:
case GT_IND:
case GT_OBJ:
case GT_STORE_OBJ:
{
ArrayInfo arrInfo;
if (!tree->AsIndir()->gtOp1->OperIs(GT_INDEX_ADDR) && TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
GetArrayInfoMap()->Set(copy, arrInfo);
}
}
break;
default:
break;
}
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Effects flags propagate upwards.
if (copy->AsOp()->gtOp1 != nullptr)
{
copy->gtFlags |= (copy->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
}
if (copy->gtGetOp2IfPresent() != nullptr)
{
copy->gtFlags |= (copy->gtGetOp2()->gtFlags & GTF_ALL_EFFECT);
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
// We can't safely clone calls that have GT_RET_EXPRs via gtCloneExpr.
// You must use gtCloneCandidateCall for these calls (and then do appropriate other fixup)
if (tree->AsCall()->IsInlineCandidate() || tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
NO_WAY("Cloning of calls with associated GT_RET_EXPR nodes is not supported");
}
copy = gtCloneExprCallHelper(tree->AsCall(), addFlags, deepVarNum, deepVarVal);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
copy = new (this, GT_SIMD)
GenTreeSIMD(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsSIMD()),
tree->AsSIMD()->GetSIMDIntrinsicId(), tree->AsSIMD()->GetSimdBaseJitType(),
tree->AsSIMD()->GetSimdSize());
goto CLONE_MULTIOP_OPERANDS;
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()),
tree->AsHWIntrinsic()->GetHWIntrinsicId(),
tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(),
tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType());
goto CLONE_MULTIOP_OPERANDS;
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
CLONE_MULTIOP_OPERANDS:
for (GenTree** use : copy->AsMultiOp()->UseEdges())
{
*use = gtCloneExpr(*use, addFlags, deepVarNum, deepVarVal);
}
break;
#endif
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
inds[dim] = gtCloneExpr(arrElem->gtArrInds[dim], addFlags, deepVarNum, deepVarVal);
}
copy = new (this, GT_ARR_ELEM)
GenTreeArrElem(arrElem->TypeGet(), gtCloneExpr(arrElem->gtArrObj, addFlags, deepVarNum, deepVarVal),
arrElem->gtArrRank, arrElem->gtArrElemSize, arrElem->gtArrElemType, &inds[0]);
}
break;
case GT_ARR_OFFSET:
{
copy = new (this, GT_ARR_OFFSET)
GenTreeArrOffs(tree->TypeGet(),
gtCloneExpr(tree->AsArrOffs()->gtOffset, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtIndex, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtArrObj, addFlags, deepVarNum, deepVarVal),
tree->AsArrOffs()->gtCurrDim, tree->AsArrOffs()->gtArrRank,
tree->AsArrOffs()->gtArrElemType);
}
break;
case GT_PHI:
{
copy = new (this, GT_PHI) GenTreePhi(tree->TypeGet());
GenTreePhi::Use** prevUse = ©->AsPhi()->gtUses;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
*prevUse = new (this, CMK_ASTNode)
GenTreePhi::Use(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal), *prevUse);
prevUse = &((*prevUse)->NextRef());
}
}
break;
case GT_FIELD_LIST:
copy = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
copy->AsFieldList()->AddField(this, gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal),
use.GetOffset(), use.GetType());
}
break;
case GT_CMPXCHG:
copy = new (this, GT_CMPXCHG)
GenTreeCmpXchg(tree->TypeGet(),
gtCloneExpr(tree->AsCmpXchg()->gtOpLocation, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpValue, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpComparand, addFlags, deepVarNum, deepVarVal));
break;
case GT_STORE_DYN_BLK:
copy = new (this, oper)
GenTreeStoreDynBlk(gtCloneExpr(tree->AsStoreDynBlk()->Addr(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->Data(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->gtDynamicSize, addFlags, deepVarNum, deepVarVal));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
NO_WAY("unexpected operator");
}
DONE:
// If it has a zero-offset field seq, copy annotation.
if (tree->TypeGet() == TYP_BYREF)
{
FieldSeqNode* fldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
{
fgAddFieldSeqForZeroOffset(copy, fldSeq);
}
}
copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* Compute the flags for the copied node. Note that we can do this only
if we didnt gtFoldExpr(copy) */
if (copy->gtOper == oper)
{
addFlags |= tree->gtFlags;
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
copy->gtFlags |= addFlags;
// Update side effect flags since they may be different from the source side effect flags.
// For example, we may have replaced some locals with constants and made indirections non-throwing.
gtUpdateNodeSideEffects(copy);
}
/* GTF_COLON_COND should be propagated from 'tree' to 'copy' */
copy->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
#if defined(DEBUG)
// Non-node debug flags should be propagated from 'tree' to 'copy'
copy->gtDebugFlags |= (tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
/* Make sure to copy back fields that may have been initialized */
copy->CopyRawCosts(tree);
copy->gtRsvdRegs = tree->gtRsvdRegs;
copy->CopyReg(tree);
return copy;
}
//------------------------------------------------------------------------
// gtCloneExprCallHelper: clone a call tree
//
// Notes:
// Do not invoke this method directly, instead call either gtCloneExpr
// or gtCloneCandidateCall, as appropriate.
//
// Arguments:
// tree - the call to clone
// addFlags - GTF_* flags to add to the copied tree nodes
// deepVarNum - lclNum to replace uses of beyond the root, or BAD_VAR_NUM for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree,
GenTreeFlags addFlags,
unsigned deepVarNum,
int deepVarVal)
{
GenTreeCall* copy = new (this, GT_CALL) GenTreeCall(tree->TypeGet());
if (tree->gtCallThisArg == nullptr)
{
copy->gtCallThisArg = nullptr;
}
else
{
copy->gtCallThisArg =
gtNewCallArgs(gtCloneExpr(tree->gtCallThisArg->GetNode(), addFlags, deepVarNum, deepVarVal));
}
copy->gtCallMoreFlags = tree->gtCallMoreFlags;
copy->gtCallArgs = nullptr;
copy->gtCallLateArgs = nullptr;
GenTreeCall::Use** argsTail = ©->gtCallArgs;
for (GenTreeCall::Use& use : tree->Args())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
argsTail = ©->gtCallLateArgs;
for (GenTreeCall::Use& use : tree->LateArgs())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
// The call sig comes from the EE and doesn't change throughout the compilation process, meaning
// we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
// (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
// because the inlinee still uses the inliner's memory allocator anyway.)
INDEBUG(copy->callSig = tree->callSig;)
// The tail call info does not change after it is allocated, so for the same reasons as above
// a shallow copy suffices.
copy->tailCallInfo = tree->tailCallInfo;
copy->gtRetClsHnd = tree->gtRetClsHnd;
copy->gtControlExpr = gtCloneExpr(tree->gtControlExpr, addFlags, deepVarNum, deepVarVal);
copy->gtStubCallStubAddr = tree->gtStubCallStubAddr;
/* Copy the union */
if (tree->gtCallType == CT_INDIRECT)
{
copy->gtCallCookie =
tree->gtCallCookie ? gtCloneExpr(tree->gtCallCookie, addFlags, deepVarNum, deepVarVal) : nullptr;
copy->gtCallAddr = tree->gtCallAddr ? gtCloneExpr(tree->gtCallAddr, addFlags, deepVarNum, deepVarVal) : nullptr;
}
else
{
copy->gtCallMethHnd = tree->gtCallMethHnd;
copy->gtInlineCandidateInfo = tree->gtInlineCandidateInfo;
}
copy->gtCallType = tree->gtCallType;
copy->gtReturnType = tree->gtReturnType;
if (tree->fgArgInfo)
{
// Create and initialize the fgArgInfo for our copy of the call tree
copy->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
}
else
{
copy->fgArgInfo = nullptr;
}
#if FEATURE_MULTIREG_RET
copy->gtReturnTypeDesc = tree->gtReturnTypeDesc;
#endif
#ifdef FEATURE_READYTORUN
copy->setEntryPoint(tree->gtEntryPoint);
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
copy->gtInlineObservation = tree->gtInlineObservation;
copy->gtRawILOffset = tree->gtRawILOffset;
copy->gtInlineContext = tree->gtInlineContext;
#endif
copy->CopyOtherRegFlags(tree);
// We keep track of the number of no return calls, so if we've cloned
// one of these, update the tracking.
//
if (tree->IsNoReturn())
{
assert(copy->IsNoReturn());
setMethodHasNoReturnCalls();
}
return copy;
}
//------------------------------------------------------------------------
// gtCloneCandidateCall: clone a call that is an inline or guarded
// devirtualization candidate (~ any call that can have a GT_RET_EXPR)
//
// Notes:
// If the call really is a candidate, the caller must take additional steps
// after cloning to re-establish candidate info and the relationship between
// the candidate and any associated GT_RET_EXPR.
//
// Arguments:
// call - the call to clone
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneCandidateCall(GenTreeCall* call)
{
assert(call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate());
GenTreeCall* result = gtCloneExprCallHelper(call);
// There is some common post-processing in gtCloneExpr that we reproduce
// here, for the fields that make sense for candidate calls.
result->gtFlags |= call->gtFlags;
#if defined(DEBUG)
result->gtDebugFlags |= (call->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
result->CopyReg(call);
return result;
}
//------------------------------------------------------------------------
// gtUpdateSideEffects: Update the side effects of a tree and its ancestors
//
// Arguments:
// stmt - The tree's statement
// tree - Tree to update the side effects for
//
// Note: If tree's order hasn't been established, the method updates side effect
// flags on all statement's nodes.
void Compiler::gtUpdateSideEffects(Statement* stmt, GenTree* tree)
{
if (fgStmtListThreaded)
{
gtUpdateTreeAncestorsSideEffects(tree);
}
else
{
gtUpdateStmtSideEffects(stmt);
}
}
//------------------------------------------------------------------------
// gtUpdateTreeAncestorsSideEffects: Update the side effects of a tree and its ancestors
// when statement order has been established.
//
// Arguments:
// tree - Tree to update the side effects for
//
void Compiler::gtUpdateTreeAncestorsSideEffects(GenTree* tree)
{
assert(fgStmtListThreaded);
while (tree != nullptr)
{
gtUpdateNodeSideEffects(tree);
tree = tree->gtGetParent(nullptr);
}
}
//------------------------------------------------------------------------
// gtUpdateStmtSideEffects: Update the side effects for statement tree nodes.
//
// Arguments:
// stmt - The statement to update side effects on
//
void Compiler::gtUpdateStmtSideEffects(Statement* stmt)
{
fgWalkTree(stmt->GetRootNodePointer(), fgUpdateSideEffectsPre, fgUpdateSideEffectsPost);
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffects: Update the side effects based on the node operation.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
// The caller of this method is expected to update the flags based on the children's flags.
//
void Compiler::gtUpdateNodeOperSideEffects(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
tree->gtFlags &= ~GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
tree->gtFlags &= ~GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffectsPost: Update the side effects based on the node operation,
// in the post-order visit of a tree walk. It is expected that the pre-order visit cleared
// the bits, so the post-order visit only sets them. This is important for binary nodes
// where one child already may have set the GTF_EXCEPT bit. Note that `SetIndirExceptionFlags`
// looks at its child, which is why we need to do this in a bottom-up walk.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_ASG, GTF_CALL, and GTF_EXCEPT flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeOperSideEffectsPost(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeSideEffects: Update the side effects based on the node operation and
// children's side efects.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeSideEffects(GenTree* tree)
{
gtUpdateNodeOperSideEffects(tree);
tree->VisitOperands([tree](GenTree* operand) -> GenTree::VisitResult {
tree->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
return GenTree::VisitResult::Continue;
});
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPre: Update the side effects based on the tree operation.
// The pre-visit walk clears GTF_ASG, GTF_CALL, and GTF_EXCEPT; the post-visit walk sets
// the bits as necessary.
//
// Arguments:
// pTree - Pointer to the tree to update the side effects
// fgWalkPre - Walk data
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPre(GenTree** pTree, fgWalkData* fgWalkPre)
{
GenTree* tree = *pTree;
tree->gtFlags &= ~(GTF_ASG | GTF_CALL | GTF_EXCEPT);
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPost: Update the side effects of the node and parent based on the tree's flags.
//
// Arguments:
// pTree - Pointer to the tree
// fgWalkPost - Walk data
//
// Notes:
// The routine is used for updating the stale side effect flags for ancestor
// nodes starting from treeParent up to the top-level stmt expr.
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPost(GenTree** pTree, fgWalkData* fgWalkPost)
{
GenTree* tree = *pTree;
// Update the node's side effects first.
fgWalkPost->compiler->gtUpdateNodeOperSideEffectsPost(tree);
// If this node is an indir or array length, and it doesn't have the GTF_EXCEPT bit set, we
// set the GTF_IND_NONFAULTING bit. This needs to be done after all children, and this node, have
// been processed.
if (tree->OperIsIndirOrArrLength() && ((tree->gtFlags & GTF_EXCEPT) == 0))
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
// Then update the parent's side effects based on this node.
GenTree* parent = fgWalkPost->parent;
if (parent != nullptr)
{
parent->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
}
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// gtGetThisArg: Return this pointer node for the call.
//
// Arguments:
// call - the call node with a this argument.
//
// Return value:
// the this pointer node.
//
GenTree* Compiler::gtGetThisArg(GenTreeCall* call)
{
assert(call->gtCallThisArg != nullptr);
GenTree* thisArg = call->gtCallThisArg->GetNode();
if (!thisArg->OperIs(GT_ASG))
{
if ((thisArg->gtFlags & GTF_LATE_ARG) == 0)
{
return thisArg;
}
}
assert(call->gtCallLateArgs != nullptr);
unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, argNum);
GenTree* result = thisArgTabEntry->GetNode();
// Assert if we used DEBUG_DESTROY_NODE.
assert(result->gtOper != GT_COUNT);
return result;
}
bool GenTree::gtSetFlags() const
{
//
// When FEATURE_SET_FLAGS (TARGET_ARM) is active the method returns true
// when the gtFlags has the flag GTF_SET_FLAGS set
// otherwise the architecture will be have instructions that typically set
// the flags and this method will return true.
//
// Exceptions: GT_IND (load/store) is not allowed to set the flags
// and on XARCH the GT_MUL/GT_DIV and all overflow instructions
// do not set the condition flags
//
// Precondition we have a GTK_SMPOP
//
if (!varTypeIsIntegralOrI(TypeGet()) && (TypeGet() != TYP_VOID))
{
return false;
}
if (((gtFlags & GTF_SET_FLAGS) != 0) && (gtOper != GT_IND))
{
// GTF_SET_FLAGS is not valid on GT_IND and is overlaid with GTF_NONFAULTING_IND
return true;
}
else
{
return false;
}
}
bool GenTree::gtRequestSetFlags()
{
bool result = false;
#if FEATURE_SET_FLAGS
// This method is a Nop unless FEATURE_SET_FLAGS is defined
// In order to set GTF_SET_FLAGS
// we must have a GTK_SMPOP
// and we have a integer or machine size type (not floating point or TYP_LONG on 32-bit)
//
if (!OperIsSimple())
return false;
if (!varTypeIsIntegralOrI(TypeGet()))
return false;
switch (gtOper)
{
case GT_IND:
case GT_ARR_LENGTH:
// These will turn into simple load from memory instructions
// and we can't force the setting of the flags on load from memory
break;
case GT_MUL:
case GT_DIV:
// These instructions don't set the flags (on x86/x64)
//
break;
default:
// Otherwise we can set the flags for this gtOper
// and codegen must set the condition flags.
//
gtFlags |= GTF_SET_FLAGS;
result = true;
break;
}
#endif // FEATURE_SET_FLAGS
// Codegen for this tree must set the condition flags if
// this method returns true.
//
return result;
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator()
: m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1)
{
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
: m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0)
{
assert(m_node != nullptr);
// NOTE: the switch statement below must be updated when introducing new nodes.
switch (m_node->OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
m_state = -1;
return;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
case GT_RETURNTRAP:
m_edge = &m_node->AsUnOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
return;
// Unary operators with an optional operand
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
if (m_node->AsUnOp()->gtOp1 == nullptr)
{
assert(m_node->NullOp1Legal());
m_state = -1;
}
else
{
m_edge = &m_node->AsUnOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
return;
// Variadic nodes
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
SetEntryStateForMultiOp();
return;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// LEA, which may have no first operand
case GT_LEA:
if (m_node->AsAddrMode()->gtOp1 == nullptr)
{
m_edge = &m_node->AsAddrMode()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else
{
SetEntryStateForBinOp();
}
return;
// Special nodes
case GT_FIELD_LIST:
m_statePtr = m_node->AsFieldList()->Uses().GetHead();
m_advance = &GenTreeUseEdgeIterator::AdvanceFieldList;
AdvanceFieldList();
return;
case GT_PHI:
m_statePtr = m_node->AsPhi()->gtUses;
m_advance = &GenTreeUseEdgeIterator::AdvancePhi;
AdvancePhi();
return;
case GT_CMPXCHG:
m_edge = &m_node->AsCmpXchg()->gtOpLocation;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceCmpXchg;
return;
case GT_ARR_ELEM:
m_edge = &m_node->AsArrElem()->gtArrObj;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrElem;
return;
case GT_ARR_OFFSET:
m_edge = &m_node->AsArrOffs()->gtOffset;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrOffset;
return;
case GT_STORE_DYN_BLK:
m_edge = &m_node->AsStoreDynBlk()->Addr();
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceStoreDynBlk;
return;
case GT_CALL:
AdvanceCall<CALL_INSTANCE>();
return;
// Binary nodes
default:
assert(m_node->OperIsBinary());
SetEntryStateForBinOp();
return;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCmpXchg: produces the next operand of a CmpXchg node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceCmpXchg()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsCmpXchg()->gtOpValue;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsCmpXchg()->gtOpComparand;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrElem: produces the next operand of a ArrElem node and advances the state.
//
// Because these nodes are variadic, this function uses `m_state` to index into the list of array indices.
//
void GenTreeUseEdgeIterator::AdvanceArrElem()
{
if (m_state < m_node->AsArrElem()->gtArrRank)
{
m_edge = &m_node->AsArrElem()->gtArrInds[m_state];
assert(*m_edge != nullptr);
m_state++;
}
else
{
m_state = -1;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrOffset: produces the next operand of a ArrOffset node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceArrOffset()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsArrOffs()->gtIndex;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsArrOffs()->gtArrObj;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceStoreDynBlk: produces the next operand of a StoreDynBlk node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceStoreDynBlk()
{
GenTreeStoreDynBlk* const dynBlock = m_node->AsStoreDynBlk();
switch (m_state)
{
case 0:
m_edge = &dynBlock->Data();
m_state = 1;
break;
case 1:
m_edge = &dynBlock->gtDynamicSize;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceFieldList: produces the next operand of a FieldList node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceFieldList()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreeFieldList::Use* currentUse = static_cast<GenTreeFieldList::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvancePhi: produces the next operand of a Phi node and advances the state.
//
void GenTreeUseEdgeIterator::AdvancePhi()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreePhi::Use* currentUse = static_cast<GenTreePhi::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceBinOp: produces the next operand of a binary node and advances the state.
//
// This function must be instantiated s.t. `ReverseOperands` is `true` iff the node is marked with the
// `GTF_REVERSE_OPS` flag.
//
template <bool ReverseOperands>
void GenTreeUseEdgeIterator::AdvanceBinOp()
{
assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0));
m_edge = !ReverseOperands ? &m_node->AsOp()->gtOp2 : &m_node->AsOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForBinOp: produces the first operand of a binary node and chooses
// the appropriate advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForBinOp()
{
assert(m_node != nullptr);
assert(m_node->OperIsBinary());
GenTreeOp* const node = m_node->AsOp();
if (node->gtOp2 == nullptr)
{
assert(node->gtOp1 != nullptr);
assert(node->NullOp2Legal());
m_edge = &node->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else if ((node->gtFlags & GTF_REVERSE_OPS) != 0)
{
m_edge = &m_node->AsOp()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<true>;
}
else
{
m_edge = &m_node->AsOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<false>;
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceMultiOp: produces the next operand of a multi-op node and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// incrementing the "m_edge" pointer, unless the end, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
m_edge++;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceReversedMultiOp: produces the next operand of a multi-op node
// marked with GTF_REVRESE_OPS and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// decrementing the "m_edge" pointer, unless the beginning, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceReversedMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
assert((m_node->AsMultiOp()->GetOperandCount() == 2) && m_node->IsReverseOp());
m_edge--;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForMultiOp: produces the first operand of a multi-op node and sets the
// required advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForMultiOp()
{
size_t operandCount = m_node->AsMultiOp()->GetOperandCount();
if (operandCount == 0)
{
Terminate();
}
else
{
if (m_node->IsReverseOp())
{
assert(operandCount == 2);
m_edge = m_node->AsMultiOp()->GetOperandArray() + 1;
m_statePtr = m_node->AsMultiOp()->GetOperandArray() - 1;
m_advance = &GenTreeUseEdgeIterator::AdvanceReversedMultiOp;
}
else
{
m_edge = m_node->AsMultiOp()->GetOperandArray();
m_statePtr = m_node->AsMultiOp()->GetOperandArray(operandCount);
m_advance = &GenTreeUseEdgeIterator::AdvanceMultiOp;
}
}
}
#endif
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCall: produces the next operand of a call node and advances the state.
//
// This function is a bit tricky: in order to avoid doing unnecessary work, it is instantiated with the
// state number the iterator will be in when it is called. For example, `AdvanceCall<CALL_INSTANCE>`
// is the instantiation used when the iterator is at the `CALL_INSTANCE` state (i.e. the entry state).
// This sort of templating allows each state to avoid processing earlier states without unnecessary
// duplication of code.
//
// Note that this method expands the argument lists (`gtCallArgs` and `gtCallLateArgs`) into their
// component operands.
//
template <int state>
void GenTreeUseEdgeIterator::AdvanceCall()
{
GenTreeCall* const call = m_node->AsCall();
switch (state)
{
case CALL_INSTANCE:
m_statePtr = call->gtCallArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ARGS>;
if (call->gtCallThisArg != nullptr)
{
m_edge = &call->gtCallThisArg->NodeRef();
return;
}
FALLTHROUGH;
case CALL_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_statePtr = call->gtCallLateArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_LATE_ARGS>;
FALLTHROUGH;
case CALL_LATE_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_CONTROL_EXPR>;
FALLTHROUGH;
case CALL_CONTROL_EXPR:
if (call->gtControlExpr != nullptr)
{
if (call->gtCallType == CT_INDIRECT)
{
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_COOKIE>;
}
else
{
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
m_edge = &call->gtControlExpr;
return;
}
else if (call->gtCallType != CT_INDIRECT)
{
m_state = -1;
return;
}
FALLTHROUGH;
case CALL_COOKIE:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ADDRESS>;
if (call->gtCallCookie != nullptr)
{
m_edge = &call->gtCallCookie;
return;
}
FALLTHROUGH;
case CALL_ADDRESS:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::Terminate;
if (call->gtCallAddr != nullptr)
{
m_edge = &call->gtCallAddr;
}
return;
default:
unreached();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::Terminate: advances the iterator to the terminal state.
//
void GenTreeUseEdgeIterator::Terminate()
{
m_state = -1;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::operator++: advances the iterator to the next operand.
//
GenTreeUseEdgeIterator& GenTreeUseEdgeIterator::operator++()
{
// If we've reached the terminal state, do nothing.
if (m_state != -1)
{
(this->*m_advance)();
}
return *this;
}
GenTreeUseEdgeIterator GenTree::UseEdgesBegin()
{
return GenTreeUseEdgeIterator(this);
}
GenTreeUseEdgeIterator GenTree::UseEdgesEnd()
{
return GenTreeUseEdgeIterator();
}
IteratorPair<GenTreeUseEdgeIterator> GenTree::UseEdges()
{
return MakeIteratorPair(UseEdgesBegin(), UseEdgesEnd());
}
GenTreeOperandIterator GenTree::OperandsBegin()
{
return GenTreeOperandIterator(this);
}
GenTreeOperandIterator GenTree::OperandsEnd()
{
return GenTreeOperandIterator();
}
IteratorPair<GenTreeOperandIterator> GenTree::Operands()
{
return MakeIteratorPair(OperandsBegin(), OperandsEnd());
}
bool GenTree::Precedes(GenTree* other)
{
assert(other != nullptr);
for (GenTree* node = gtNext; node != nullptr; node = node->gtNext)
{
if (node == other)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------------
// SetIndirExceptionFlags : Set GTF_EXCEPT and GTF_IND_NONFAULTING flags as appropriate
// on an indirection or an array length node.
//
// Arguments:
// comp - compiler instance
//
void GenTree::SetIndirExceptionFlags(Compiler* comp)
{
assert(OperIsIndirOrArrLength());
if (OperMayThrow(comp))
{
gtFlags |= GTF_EXCEPT;
return;
}
GenTree* addr = nullptr;
if (OperIsIndir())
{
addr = AsIndir()->Addr();
}
else
{
assert(gtOper == GT_ARR_LENGTH);
addr = AsArrLen()->ArrRef();
}
if ((addr->gtFlags & GTF_EXCEPT) != 0)
{
gtFlags |= GTF_EXCEPT;
}
else
{
gtFlags &= ~GTF_EXCEPT;
gtFlags |= GTF_IND_NONFAULTING;
}
}
#ifdef DEBUG
/* static */ int GenTree::gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags)
{
int charsDisplayed = 11; // 11 is the "baseline" number of flag characters displayed
printf("%c", (flags & GTF_ASG) ? 'A' : (IsContained(flags) ? 'c' : '-'));
printf("%c", (flags & GTF_CALL) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-');
printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
(flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-'
printf("%c", (flags & GTF_COLON_COND) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-');
#if FEATURE_SET_FLAGS
printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-');
++charsDisplayed;
#endif
printf("%c", (flags & GTF_LATE_ARG) ? 'L' : '-');
printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-');
return charsDisplayed;
}
#ifdef TARGET_X86
inline const char* GetCallConvName(CorInfoCallConvExtension callConv)
{
switch (callConv)
{
case CorInfoCallConvExtension::Managed:
return "Managed";
case CorInfoCallConvExtension::C:
return "C";
case CorInfoCallConvExtension::Stdcall:
return "Stdcall";
case CorInfoCallConvExtension::Thiscall:
return "Thiscall";
case CorInfoCallConvExtension::Fastcall:
return "Fastcall";
case CorInfoCallConvExtension::CMemberFunction:
return "CMemberFunction";
case CorInfoCallConvExtension::StdcallMemberFunction:
return "StdcallMemberFunction";
case CorInfoCallConvExtension::FastcallMemberFunction:
return "FastcallMemberFunction";
default:
return "UnknownCallConv";
}
}
#endif // TARGET_X86
/*****************************************************************************/
void Compiler::gtDispNodeName(GenTree* tree)
{
/* print the node name */
const char* name;
assert(tree);
if (tree->gtOper < GT_COUNT)
{
name = GenTree::OpName(tree->OperGet());
}
else
{
name = "<ERROR>";
}
char buf[32];
char* bufp = &buf[0];
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
sprintf_s(bufp, sizeof(buf), " %s(h)%c", name, 0);
}
else if (tree->gtOper == GT_PUTARG_STK)
{
sprintf_s(bufp, sizeof(buf), " %s [+0x%02x]%c", name, tree->AsPutArgStk()->getArgOffset(), 0);
}
else if (tree->gtOper == GT_CALL)
{
const char* callType = "CALL";
const char* gtfType = "";
const char* ctType = "";
char gtfTypeBuf[100];
if (tree->AsCall()->gtCallType == CT_USER_FUNC)
{
if (tree->AsCall()->IsVirtual())
{
callType = "CALLV";
}
}
else if (tree->AsCall()->gtCallType == CT_HELPER)
{
ctType = " help";
}
else if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
ctType = " ind";
}
else
{
assert(!"Unknown gtCallType");
}
if (tree->gtFlags & GTF_CALL_NULLCHECK)
{
gtfType = " nullcheck";
}
if (tree->AsCall()->IsVirtualVtable())
{
gtfType = " vt-ind";
}
else if (tree->AsCall()->IsVirtualStub())
{
gtfType = " stub";
}
#ifdef FEATURE_READYTORUN
else if (tree->AsCall()->IsR2RRelativeIndir())
{
gtfType = " r2r_ind";
}
#endif // FEATURE_READYTORUN
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
char* gtfTypeBufWalk = gtfTypeBuf;
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " unman");
if (tree->gtFlags & GTF_CALL_POP_ARGS)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " popargs");
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " thiscall");
}
#ifdef TARGET_X86
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " %s",
GetCallConvName(tree->AsCall()->GetUnmanagedCallConv()));
#endif // TARGET_X86
gtfType = gtfTypeBuf;
}
sprintf_s(bufp, sizeof(buf), " %s%s%s%c", callType, ctType, gtfType, 0);
}
else if (tree->gtOper == GT_ARR_ELEM)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
for (unsigned rank = tree->AsArrElem()->gtArrRank - 1; rank; rank--)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_ARR_OFFSET || tree->gtOper == GT_ARR_INDEX)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
unsigned char currDim;
unsigned char rank;
if (tree->gtOper == GT_ARR_OFFSET)
{
currDim = tree->AsArrOffs()->gtCurrDim;
rank = tree->AsArrOffs()->gtArrRank;
}
else
{
currDim = tree->AsArrIndex()->gtCurrDim;
rank = tree->AsArrIndex()->gtArrRank;
}
for (unsigned char dim = 0; dim < rank; dim++)
{
// Use a defacto standard i,j,k for the dimensions.
// Note that we only support up to rank 3 arrays with these nodes, so we won't run out of characters.
char dimChar = '*';
if (dim == currDim)
{
dimChar = 'i' + dim;
}
else if (dim > currDim)
{
dimChar = ' ';
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%c", dimChar);
if (dim != rank - 1)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_LEA)
{
GenTreeAddrMode* lea = tree->AsAddrMode();
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name);
if (lea->Base() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
}
if (lea->Index() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%d)", lea->Offset());
}
else if (tree->gtOper == GT_BOUNDS_CHECK)
{
switch (tree->AsBoundsChk()->gtThrowKind)
{
case SCK_RNGCHK_FAIL:
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s_Rng", name);
if (tree->AsBoundsChk()->gtIndRngFailBB != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " -> " FMT_BB,
tree->AsBoundsChk()->gtIndRngFailBB->bbNum);
}
break;
}
case SCK_ARG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_Arg", name);
break;
case SCK_ARG_RNG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name);
break;
default:
unreached();
}
}
else if (tree->gtOverflowEx())
{
sprintf_s(bufp, sizeof(buf), " %s_ovfl%c", name, 0);
}
else
{
sprintf_s(bufp, sizeof(buf), " %s%c", name, 0);
}
if (strlen(buf) < 10)
{
printf(" %-10s", buf);
}
else
{
printf(" %s", buf);
}
}
//------------------------------------------------------------------------
// gtDispZeroFieldSeq: If this node has a zero fieldSeq annotation
// then print this Field Sequence
//
void Compiler::gtDispZeroFieldSeq(GenTree* tree)
{
NodeToFieldSeqMap* map = GetZeroOffsetFieldMap();
// THe most common case is having no entries in this map
if (map->GetCount() > 0)
{
FieldSeqNode* fldSeq = nullptr;
if (map->Lookup(tree, &fldSeq))
{
printf(" Zero");
gtDispAnyFieldSeq(fldSeq);
}
}
}
//------------------------------------------------------------------------
// gtDispVN: Utility function that prints a tree's ValueNumber: gtVNPair
//
void Compiler::gtDispVN(GenTree* tree)
{
if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
{
assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);
printf(" ");
vnpPrint(tree->gtVNPair, 0);
}
}
//------------------------------------------------------------------------
// gtDispCommonEndLine
// Utility function that prints the following node information
// 1: The associated zero field sequence (if any)
// 2. The register assigned to this node (if any)
// 2. The value number assigned (if any)
// 3. A newline character
//
void Compiler::gtDispCommonEndLine(GenTree* tree)
{
gtDispZeroFieldSeq(tree);
gtDispRegVal(tree);
gtDispVN(tree);
printf("\n");
}
//------------------------------------------------------------------------
// gtDispNode: Print a tree to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// msg - a contextual method (i.e. from the parent) to print
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' may be null
void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_z_ const char* msg, bool isLIR)
{
bool printFlags = true; // always true..
int msgLength = 25;
GenTree* prev;
if (tree->gtSeqNum)
{
printf("N%03u ", tree->gtSeqNum);
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
printf("(???"
",???"
") "); // This probably indicates a bug: the node has a sequence number, but not costs.
}
}
else
{
prev = tree;
bool hasSeqNum = true;
unsigned dotNum = 0;
do
{
dotNum++;
prev = prev->gtPrev;
if ((prev == nullptr) || (prev == tree))
{
hasSeqNum = false;
break;
}
assert(prev);
} while (prev->gtSeqNum == 0);
// If we have an indent stack, don't add additional characters,
// as it will mess up the alignment.
bool displayDotNum = hasSeqNum && (indentStack == nullptr);
if (displayDotNum)
{
printf("N%03u.%02u ", prev->gtSeqNum, dotNum);
}
else
{
printf(" ");
}
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
if (displayDotNum)
{
// Do better alignment in this case
printf(" ");
}
else
{
printf(" ");
}
}
}
if (optValnumCSE_phase)
{
if (IS_CSE_INDEX(tree->gtCSEnum))
{
printf(FMT_CSE " (%s)", GET_CSE_INDEX(tree->gtCSEnum), (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
}
else
{
printf(" ");
}
}
/* Print the node ID */
printTreeID(tree);
printf(" ");
if (tree->gtOper >= GT_COUNT)
{
printf(" **** ILLEGAL NODE ****");
return;
}
if (printFlags)
{
/* First print the flags associated with the node */
switch (tree->gtOper)
{
case GT_LEA:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_IND:
// We prefer printing V or U
if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
{
if (tree->gtFlags & GTF_IND_TGTANYWHERE)
{
printf("*");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP)
{
printf("s");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_INVARIANT)
{
printf("#");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ARR_INDEX)
{
printf("a");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
printf("n"); // print a n for non-faulting
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ASG_LHS)
{
printf("D"); // print a D for definition
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONNULL)
{
printf("@");
--msgLength;
break;
}
}
FALLTHROUGH;
case GT_INDEX:
case GT_INDEX_ADDR:
case GT_FIELD:
case GT_CLS_VAR:
if (tree->gtFlags & GTF_IND_VOLATILE)
{
printf("V");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_UNALIGNED)
{
printf("U");
--msgLength;
break;
}
goto DASH;
case GT_ASG:
if (tree->OperIsInitBlkOp())
{
printf("I");
--msgLength;
break;
}
goto DASH;
case GT_CALL:
if (tree->AsCall()->IsInlineCandidate())
{
if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("&");
}
else
{
printf("I");
}
--msgLength;
break;
}
else if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("G");
--msgLength;
break;
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
{
printf("S");
--msgLength;
break;
}
if (tree->gtFlags & GTF_CALL_HOISTABLE)
{
printf("H");
--msgLength;
break;
}
goto DASH;
case GT_MUL:
#if !defined(TARGET_64BIT)
case GT_MUL_LONG:
#endif
if (tree->gtFlags & GTF_MUL_64RSLT)
{
printf("L");
--msgLength;
break;
}
goto DASH;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (tree->gtFlags & GTF_DIV_BY_CNS_OPT)
{
printf("M"); // We will use a Multiply by reciprical
--msgLength;
break;
}
goto DASH;
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (tree->gtFlags & GTF_VAR_USEASG)
{
printf("U");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_MULTIREG)
{
printf((tree->gtFlags & GTF_VAR_DEF) ? "M" : "m");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_DEF)
{
printf("D");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CAST)
{
printf("C");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
printf("i");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CONTEXT)
{
printf("!");
--msgLength;
break;
}
goto DASH;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
printf("N");
--msgLength;
break;
}
if (tree->gtFlags & GTF_RELOP_JMP_USED)
{
printf("J");
--msgLength;
break;
}
goto DASH;
case GT_JCMP:
printf((tree->gtFlags & GTF_JCMP_TST) ? "T" : "C");
printf((tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
goto DASH;
case GT_CNS_INT:
if (tree->IsIconHandle())
{
if ((tree->gtFlags & GTF_ICON_INITCLASS) != 0)
{
printf("I"); // Static Field handle with INITCLASS requirement
--msgLength;
break;
}
else if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf("O");
--msgLength;
break;
}
else
{
// Some other handle
printf("H");
--msgLength;
break;
}
}
goto DASH;
default:
DASH:
printf("-");
--msgLength;
break;
}
/* Then print the general purpose flags */
GenTreeFlags flags = tree->gtFlags;
if (tree->OperIsBinary() || tree->OperIsMultiOp())
{
genTreeOps oper = tree->OperGet();
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul/shl Binary Operators
if ((oper == GT_ADD) || (oper == GT_MUL) || (oper == GT_LSH))
{
if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
{
flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
}
}
}
else // !(tree->OperIsBinary() || tree->OperIsMultiOp())
{
// the GTF_REVERSE flag only applies to binary operations (which some MultiOp nodes are).
flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
}
msgLength -= GenTree::gtDispFlags(flags, tree->gtDebugFlags);
/*
printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
(flags & GTF_BOOLEAN ) ? 'B' : '-');
printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
*/
}
// If we're printing a node for LIR, we use the space normally associated with the message
// to display the node's temp name (if any)
const bool hasOperands = tree->OperandsBegin() != tree->OperandsEnd();
if (isLIR)
{
assert(msg == nullptr);
// If the tree does not have any operands, we do not display the indent stack. This gives us
// two additional characters for alignment.
if (!hasOperands)
{
msgLength += 1;
}
if (tree->IsValue())
{
const size_t bufLength = msgLength - 1;
msg = reinterpret_cast<char*>(_alloca(bufLength * sizeof(char)));
sprintf_s(const_cast<char*>(msg), bufLength, "t%d = %s", tree->gtTreeID, hasOperands ? "" : " ");
}
}
/* print the msg associated with the node */
if (msg == nullptr)
{
msg = "";
}
if (msgLength < 0)
{
msgLength = 0;
}
printf(isLIR ? " %+*s" : " %-*s", msgLength, msg);
/* Indent the node accordingly */
if (!isLIR || hasOperands)
{
printIndent(indentStack);
}
gtDispNodeName(tree);
assert(tree == nullptr || tree->gtOper < GT_COUNT);
if (tree)
{
/* print the type of the node */
if (tree->gtOper != GT_CAST)
{
printf(" %-6s", varTypeName(tree->TypeGet()));
if (varTypeIsStruct(tree->TypeGet()))
{
ClassLayout* layout = nullptr;
if (tree->OperIs(GT_BLK, GT_OBJ, GT_STORE_BLK, GT_STORE_OBJ))
{
layout = tree->AsBlk()->GetLayout();
}
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varTypeIsStruct(varDsc->TypeGet()))
{
layout = varDsc->GetLayout();
}
}
if (layout != nullptr)
{
gtDispClassLayout(layout, tree->TypeGet());
}
}
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_STORE_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->IsAddressExposed())
{
printf("(AX)"); // Variable has address exposed.
}
if (varDsc->lvUnusedStruct)
{
assert(varDsc->lvPromoted);
printf("(U)"); // Unused struct
}
else if (varDsc->lvPromoted)
{
if (varTypeIsPromotable(varDsc))
{
printf("(P)"); // Promoted struct
}
else
{
// Promoted implicit by-refs can have this state during
// global morph while they are being rewritten
printf("(P?!)"); // Promoted struct
}
}
}
if (tree->IsArgPlaceHolderNode() && (tree->AsArgPlace()->gtArgPlaceClsHnd != nullptr))
{
printf(" => [clsHnd=%08X]", dspPtr(tree->AsArgPlace()->gtArgPlaceClsHnd));
}
if (tree->gtOper == GT_RUNTIMELOOKUP)
{
#ifdef TARGET_64BIT
printf(" 0x%llx", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#else
printf(" 0x%x", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#endif
switch (tree->AsRuntimeLookup()->gtHndType)
{
case CORINFO_HANDLETYPE_CLASS:
printf(" class");
break;
case CORINFO_HANDLETYPE_METHOD:
printf(" method");
break;
case CORINFO_HANDLETYPE_FIELD:
printf(" field");
break;
default:
printf(" unknown");
break;
}
}
}
// for tracking down problems in reguse prediction or liveness tracking
if (verbose && 0)
{
printf(" RR=");
dspRegMask(tree->gtRsvdRegs);
printf("\n");
}
}
}
#if FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispMultiRegCount: determine how many registers to print for a multi-reg node
//
// Arguments:
// tree - GenTree node whose registers we want to print
//
// Return Value:
// The number of registers to print
//
// Notes:
// This is not the same in all cases as GenTree::GetMultiRegCount().
// In particular, for COPY or RELOAD it only returns the number of *valid* registers,
// and for CALL, it will return 0 if the ReturnTypeDesc hasn't yet been initialized.
// But we want to print all register positions.
//
unsigned Compiler::gtDispMultiRegCount(GenTree* tree)
{
if (tree->IsCopyOrReload())
{
// GetRegCount() will return only the number of valid regs for COPY or RELOAD,
// but we want to print all positions, so we get the reg count for op1.
return gtDispMultiRegCount(tree->gtGetOp1());
}
else if (!tree->IsMultiRegNode())
{
// We can wind up here because IsMultiRegNode() always returns true for COPY or RELOAD,
// even if its op1 is not multireg.
// Note that this method won't be called for non-register-producing nodes.
return 1;
}
else if (tree->OperIs(GT_CALL))
{
unsigned regCount = tree->AsCall()->GetReturnTypeDesc()->TryGetReturnRegCount();
// If it hasn't yet been initialized, we'd still like to see the registers printed.
if (regCount == 0)
{
regCount = MAX_RET_REG_COUNT;
}
return regCount;
}
else
{
return tree->GetMultiRegCount(this);
}
}
#endif // FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispRegVal: Print the register(s) defined by the given node
//
// Arguments:
// tree - Gentree node whose registers we want to print
//
void Compiler::gtDispRegVal(GenTree* tree)
{
switch (tree->GetRegTag())
{
// Don't display anything for the GT_REGTAG_NONE case;
// the absence of printed register values will imply this state.
case GenTree::GT_REGTAG_REG:
printf(" REG %s", compRegVarName(tree->GetRegNum()));
break;
default:
return;
}
#if FEATURE_MULTIREG_RET
if (tree->IsMultiRegNode())
{
// 0th reg is GetRegNum(), which is already printed above.
// Print the remaining regs of a multi-reg node.
unsigned regCount = gtDispMultiRegCount(tree);
// For some nodes, e.g. COPY, RELOAD or CALL, we may not have valid regs for all positions.
for (unsigned i = 1; i < regCount; ++i)
{
regNumber reg = tree->GetRegByIndex(i);
printf(",%s", genIsValidReg(reg) ? compRegVarName(reg) : "NA");
}
}
#endif
}
// We usually/commonly don't expect to print anything longer than this string,
#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2)
void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut)
{
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = compMap2ILvarNum(lclNum);
if (ilNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM)
{
ilName = "RetBuf";
}
else if (ilNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM)
{
ilName = "VarArgHandle";
}
else if (ilNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM)
{
ilName = "TypeCtx";
}
else if (ilNum == (unsigned)ICorDebugInfo::UNKNOWN_ILNUM)
{
if (lclNumIsTrueCSE(lclNum))
{
ilKind = "cse";
ilNum = lclNum - optCSEstart;
}
else if (lclNum >= optCSEstart)
{
// Currently any new LclVar's introduced after the CSE phase
// are believed to be created by the "rationalizer" that is what is meant by the "rat" prefix.
ilKind = "rat";
ilNum = lclNum - (optCSEstart + optCSEcount);
}
else
{
if (lclNum == info.compLvFrameListRoot)
{
ilName = "FramesRoot";
}
else if (lclNum == lvaInlinedPInvokeFrameVar)
{
ilName = "PInvokeFrame";
}
else if (lclNum == lvaGSSecurityCookie)
{
ilName = "GsCookie";
}
else if (lclNum == lvaRetAddrVar)
{
ilName = "ReturnAddress";
}
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
{
ilName = "PInvokeFrameRegSave";
}
else if (lclNum == lvaOutgoingArgSpaceVar)
{
ilName = "OutArgs";
}
#endif // FEATURE_FIXED_OUT_ARGS
#if !defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaShadowSPslotsVar)
{
ilName = "EHSlots";
}
#endif // !FEATURE_EH_FUNCLETS
#ifdef JIT32_GCENCODER
else if (lclNum == lvaLocAllocSPvar)
{
ilName = "LocAllocSP";
}
#endif // JIT32_GCENCODER
#if defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaPSPSym)
{
ilName = "PSPSym";
}
#endif // FEATURE_EH_FUNCLETS
else
{
ilKind = "tmp";
if (compIsForInlining())
{
ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
}
else
{
ilNum = lclNum - info.compLocalsCount;
}
}
}
}
else if (lclNum < (compIsForInlining() ? impInlineInfo->InlinerCompiler->info.compArgsCount : info.compArgsCount))
{
if (ilNum == 0 && !info.compIsStatic)
{
ilName = "this";
}
else
{
ilKind = "arg";
}
}
else
{
if (!lvaTable[lclNum].lvIsStructField)
{
ilKind = "loc";
}
if (compIsForInlining())
{
ilNum -= impInlineInfo->InlinerCompiler->info.compILargsCount;
}
else
{
ilNum -= info.compILargsCount;
}
}
*ilKindOut = ilKind;
*ilNameOut = ilName;
*ilNumOut = ilNum;
}
/*****************************************************************************/
int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
{
char* bufp_next = buf;
unsigned charsPrinted = 0;
int sprintf_result;
sprintf_result = sprintf_s(bufp_next, buf_remaining, "V%02u", lclNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = 0;
gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
if (ilName != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s", ilName);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
else if (ilKind != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s%d", ilKind, ilNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
assert(charsPrinted > 0);
assert(buf_remaining > 0);
return (int)charsPrinted;
}
/*****************************************************************************
* Get the local var name, and create a copy of the string that can be used in debug output.
*/
char* Compiler::gtGetLclVarName(unsigned lclNum)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return nullptr;
}
char* retBuf = new (this, CMK_DebugOnly) char[charsPrinted + 1];
strcpy_s(retBuf, charsPrinted + 1, buf);
return retBuf;
}
/*****************************************************************************/
void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return;
}
printf("%s", buf);
if (padForBiggestDisp && (charsPrinted < (int)LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH))
{
printf("%*c", LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH - charsPrinted, ' ');
}
}
//------------------------------------------------------------------------
// gtDispLclVarStructType: Print size and type information about a struct or lclBlk local variable.
//
// Arguments:
// lclNum - The local var id.
//
void Compiler::gtDispLclVarStructType(unsigned lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types type = varDsc->TypeGet();
if (type == TYP_STRUCT)
{
ClassLayout* layout = varDsc->GetLayout();
assert(layout != nullptr);
gtDispClassLayout(layout, type);
}
else if (type == TYP_LCLBLK)
{
#if FEATURE_FIXED_OUT_ARGS
assert(lclNum == lvaOutgoingArgSpaceVar);
// Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
// after we set it to something.
if (lvaOutgoingArgSpaceSize.HasFinalValue())
{
// A PhasedVar<T> can't be directly used as an arg to a variadic function
unsigned value = lvaOutgoingArgSpaceSize;
printf("<%u> ", value);
}
else
{
printf("<na> "); // The value hasn't yet been determined
}
#else
assert(!"Unknown size");
NO_WAY("Target doesn't support TYP_LCLBLK");
#endif // FEATURE_FIXED_OUT_ARGS
}
}
//------------------------------------------------------------------------
// gtDispClassLayout: Print size and type information about a layout.
//
// Arguments:
// layout - the layout;
// type - variable type, used to avoid printing size for SIMD nodes.
//
void Compiler::gtDispClassLayout(ClassLayout* layout, var_types type)
{
assert(layout != nullptr);
if (layout->IsBlockLayout())
{
printf("<%u>", layout->GetSize());
}
else if (varTypeIsSIMD(type))
{
printf("<%s>", layout->GetClassName());
}
else
{
printf("<%s, %u>", layout->GetClassName(), layout->GetSize());
}
}
/*****************************************************************************/
void Compiler::gtDispConst(GenTree* tree)
{
assert(tree->OperIsConst());
switch (tree->gtOper)
{
case GT_CNS_INT:
if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
const WCHAR* str = eeGetCPString(tree->AsIntCon()->gtIconVal);
// If *str points to a '\0' then don't print the string's values
if ((str != nullptr) && (*str != '\0'))
{
printf(" 0x%X \"%S\"", dspPtr(tree->AsIntCon()->gtIconVal), str);
}
else // We can't print the value of the string
{
// Note that eeGetCPString isn't currently implemented on Linux/ARM
// and instead always returns nullptr
printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->gtIconVal));
}
}
else
{
ssize_t dspIconVal =
tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->gtIconVal) : tree->AsIntCon()->gtIconVal;
if (tree->TypeGet() == TYP_REF)
{
assert(tree->AsIntCon()->gtIconVal == 0);
printf(" null");
}
else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000))
{
printf(" %ld", dspIconVal);
}
#ifdef TARGET_64BIT
else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0)
{
if (dspIconVal >= 0)
{
printf(" 0x%llx", dspIconVal);
}
else
{
printf(" -0x%llx", -dspIconVal);
}
}
#endif
else
{
if (dspIconVal >= 0)
{
printf(" 0x%X", dspIconVal);
}
else
{
printf(" -0x%X", -dspIconVal);
}
}
if (tree->IsIconHandle())
{
switch (tree->GetIconHandleFlag())
{
case GTF_ICON_SCOPE_HDL:
printf(" scope");
break;
case GTF_ICON_CLASS_HDL:
printf(" class");
break;
case GTF_ICON_METHOD_HDL:
printf(" method");
break;
case GTF_ICON_FIELD_HDL:
printf(" field");
break;
case GTF_ICON_STATIC_HDL:
printf(" static");
break;
case GTF_ICON_STR_HDL:
unreached(); // This case is handled above
break;
case GTF_ICON_CONST_PTR:
printf(" const ptr");
break;
case GTF_ICON_GLOBAL_PTR:
printf(" global ptr");
break;
case GTF_ICON_VARG_HDL:
printf(" vararg");
break;
case GTF_ICON_PINVKI_HDL:
printf(" pinvoke");
break;
case GTF_ICON_TOKEN_HDL:
printf(" token");
break;
case GTF_ICON_TLS_HDL:
printf(" tls");
break;
case GTF_ICON_FTN_ADDR:
printf(" ftn");
break;
case GTF_ICON_CIDMID_HDL:
printf(" cid/mid");
break;
case GTF_ICON_BBC_PTR:
printf(" bbc");
break;
case GTF_ICON_STATIC_BOX_PTR:
printf(" static box ptr");
break;
default:
printf(" UNKNOWN");
break;
}
}
if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf(" field offset");
}
#ifdef FEATURE_SIMD
if ((tree->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
{
printf(" vector element count");
}
#endif
if ((tree->IsReuseRegVal()) != 0)
{
printf(" reuse reg val");
}
}
gtDispFieldSeq(tree->AsIntCon()->gtFieldSeq);
break;
case GT_CNS_LNG:
printf(" 0x%016I64x", tree->AsLngCon()->gtLconVal);
break;
case GT_CNS_DBL:
if (*((__int64*)&tree->AsDblCon()->gtDconVal) == (__int64)I64(0x8000000000000000))
{
printf(" -0.00000");
}
else
{
printf(" %#.17g", tree->AsDblCon()->gtDconVal);
}
break;
case GT_CNS_STR:
printf("<string constant>");
break;
default:
assert(!"unexpected constant node");
}
}
//------------------------------------------------------------------------
// gtDispFieldSeq: "gtDispFieldSeq" that also prints "<NotAField>".
//
// Useful for printing zero-offset field sequences.
//
void Compiler::gtDispAnyFieldSeq(FieldSeqNode* fieldSeq)
{
if (fieldSeq == FieldSeqStore::NotAField())
{
printf(" Fseq<NotAField>");
return;
}
gtDispFieldSeq(fieldSeq);
}
//------------------------------------------------------------------------
// gtDispFieldSeq: Print out the fields in this field sequence.
//
void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
{
if ((pfsn == nullptr) || (pfsn == FieldSeqStore::NotAField()))
{
return;
}
// Otherwise...
printf(" Fseq[");
while (pfsn != nullptr)
{
assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
CORINFO_FIELD_HANDLE fldHnd = pfsn->m_fieldHnd;
// First check the "pseudo" field handles...
if (fldHnd == FieldSeqStore::FirstElemPseudoField)
{
printf("#FirstElem");
}
else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField)
{
printf("#ConstantIndex");
}
else
{
printf("%s", eeGetFieldName(fldHnd));
}
pfsn = pfsn->m_next;
if (pfsn != nullptr)
{
printf(", ");
}
}
printf("]");
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a single leaf node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
{
if (tree->OperIsConst())
{
gtDispConst(tree);
return;
}
bool isLclFld = false;
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
isLclFld = true;
FALLTHROUGH;
case GT_PHI_ARG:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_STORE_LCL_VAR:
{
printf(" ");
const unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(varNum);
gtDispLclVar(varNum);
if (tree->AsLclVarCommon()->HasSsaName())
{
if (tree->gtFlags & GTF_VAR_USEASG)
{
assert(tree->gtFlags & GTF_VAR_DEF);
printf("ud:%d->%d", tree->AsLclVarCommon()->GetSsaNum(), GetSsaNumForLocalVarDef(tree));
}
else
{
printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->AsLclVarCommon()->GetSsaNum());
}
}
if (isLclFld)
{
printf("[+%u]", tree->AsLclFld()->GetLclOffs());
gtDispFieldSeq(tree->AsLclFld()->GetFieldSeq());
}
if (varDsc->lvRegister)
{
printf(" ");
varDsc->PrintVarReg();
}
else if (tree->InReg())
{
printf(" %s", compRegVarName(tree->GetRegNum()));
}
if (varDsc->lvPromoted)
{
if (!varTypeIsPromotable(varDsc) && !varDsc->lvUnusedStruct)
{
// Promoted implicit byrefs can get in this state while they are being rewritten
// in global morph.
}
else
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(i);
const char* fieldName;
#if !defined(TARGET_64BIT)
if (varTypeIsLong(varDsc))
{
fieldName = (i == 0) ? "lo" : "hi";
}
else
#endif // !defined(TARGET_64BIT)
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
CORINFO_FIELD_HANDLE fldHnd =
info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
fieldName = eeGetFieldName(fldHnd);
}
printf("\n");
printf(" ");
printIndent(indentStack);
printf(" %-6s V%02u.%s (offs=0x%02x) -> ", varTypeName(fieldVarDsc->TypeGet()),
tree->AsLclVarCommon()->GetLclNum(), fieldName, fieldVarDsc->lvFldOffset);
gtDispLclVar(i);
if (fieldVarDsc->lvRegister)
{
printf(" ");
fieldVarDsc->PrintVarReg();
}
if (fieldVarDsc->lvTracked && fgLocalVarLivenessDone && tree->IsMultiRegLclVar() &&
tree->AsLclVar()->IsLastUse(i - varDsc->lvFieldLclStart))
{
printf(" (last use)");
}
}
}
}
else // a normal not-promoted lclvar
{
if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
printf(" (last use)");
}
}
}
break;
case GT_JMP:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsVal()->gtVal1, &className);
printf(" %s.%s\n", className, methodName);
}
break;
case GT_CLS_VAR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
gtDispFieldSeq(tree->AsClsVar()->gtFieldSeq);
break;
case GT_CLS_VAR_ADDR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
break;
case GT_LABEL:
break;
case GT_FTN_ADDR:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsFptrVal()->gtFptrMethod, &className);
printf(" %s.%s\n", className, methodName);
}
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
printf(" endNstLvl=%d", tree->AsVal()->gtVal1);
break;
#endif // !FEATURE_EH_FUNCLETS
// Vanilla leaves. No qualifying information available. So do nothing
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
case GT_CATCH_ARG:
case GT_MEMORYBARRIER:
case GT_ARGPLACE:
case GT_PINVOKE_PROLOG:
case GT_JMPTABLE:
break;
case GT_RET_EXPR:
{
GenTree* const associatedTree = tree->AsRetExpr()->gtInlineCandidate;
printf("(inl return %s ", tree->IsCall() ? " from call" : "expr");
printTreeID(associatedTree);
printf(")");
}
break;
case GT_PHYSREG:
printf(" %s", getRegName(tree->AsPhysReg()->gtSrcReg));
break;
case GT_IL_OFFSET:
printf(" ");
tree->AsILOffset()->gtStmtDI.Dump(true);
break;
case GT_JCC:
case GT_SETCC:
printf(" cond=%s", tree->AsCC()->gtCondition.Name());
break;
case GT_JCMP:
printf(" cond=%s%s", (tree->gtFlags & GTF_JCMP_TST) ? "TEST_" : "",
(tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
break;
default:
assert(!"don't know how to display tree leaf node");
}
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a child node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// arcType - the type of arc to use for this child
// msg - a contextual method (i.e. from the parent) to print
// topOnly - a boolean indicating whether to print the children, or just the top node
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' has a default value of null
// 'topOnly' is an optional argument that defaults to false
void Compiler::gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg, /* = nullptr */
bool topOnly) /* = false */
{
indentStack->Push(arcType);
gtDispTree(child, indentStack, msg, topOnly);
indentStack->Pop();
}
#ifdef FEATURE_SIMD
// Intrinsic Id to name map
extern const char* const simdIntrinsicNames[] = {
#define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
#include "simdintrinsiclist.h"
};
#endif // FEATURE_SIMD
/*****************************************************************************/
void Compiler::gtDispTree(GenTree* tree,
IndentStack* indentStack, /* = nullptr */
_In_ _In_opt_z_ const char* msg, /* = nullptr */
bool topOnly, /* = false */
bool isLIR) /* = false */
{
if (tree == nullptr)
{
printf(" [%08X] <NULL>\n", tree);
printf(""); // null string means flush
return;
}
if (indentStack == nullptr)
{
indentStack = new (this, CMK_DebugOnly) IndentStack(this);
}
if (IsUninitialized(tree))
{
/* Value used to initalize nodes */
printf("Uninitialized tree node!\n");
return;
}
if (tree->gtOper >= GT_COUNT)
{
gtDispNode(tree, indentStack, msg, isLIR);
printf("Bogus operator!\n");
return;
}
/* Is tree a leaf node? */
if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
gtDispNode(tree, indentStack, msg, isLIR);
gtDispLeaf(tree, indentStack);
gtDispCommonEndLine(tree);
if (tree->OperIsLocalStore() && !topOnly)
{
gtDispChild(tree->AsOp()->gtOp1, indentStack, IINone);
}
return;
}
// Determine what kind of arc to propagate.
IndentInfo myArc = IINone;
IndentInfo lowerArc = IINone;
if (indentStack->Depth() > 0)
{
myArc = indentStack->Pop();
switch (myArc)
{
case IIArcBottom:
indentStack->Push(IIArc);
lowerArc = IINone;
break;
case IIArc:
indentStack->Push(IIArc);
lowerArc = IIArc;
break;
case IIArcTop:
indentStack->Push(IINone);
lowerArc = IIArc;
break;
case IINone:
indentStack->Push(IINone);
lowerArc = IINone;
break;
default:
unreached();
break;
}
}
/* Is it a 'simple' unary/binary operator? */
const char* childMsg = nullptr;
if (tree->OperIsSimple())
{
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
if (tree->gtOper == GT_CAST)
{
/* Format a message that explains the effect of this GT_CAST */
var_types fromType = genActualType(tree->AsCast()->CastOp()->TypeGet());
var_types toType = tree->CastToType();
var_types finalType = tree->TypeGet();
/* if GTF_UNSIGNED is set then force fromType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
{
fromType = varTypeToUnsigned(fromType);
}
if (finalType != toType)
{
printf(" %s <-", varTypeName(finalType));
}
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
if (tree->OperIsBlkOp())
{
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
if (tree->OperIsStoreBlk() && (tree->AsBlk()->gtBlkOpKind != GenTreeBlk::BlkOpKindInvalid))
{
switch (tree->AsBlk()->gtBlkOpKind)
{
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
printf(" (RepInstr)");
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
printf(" (Unroll)");
break;
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
printf(" (Helper)");
break;
#endif
default:
unreached();
}
}
}
#if FEATURE_PUT_STRUCT_ARG_STK
else if (tree->OperGet() == GT_PUTARG_STK)
{
const GenTreePutArgStk* putArg = tree->AsPutArgStk();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots,
putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset());
}
#endif
if (putArg->gtPutArgStkKind != GenTreePutArgStk::Kind::Invalid)
{
switch (putArg->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
printf(" (RepInstr)");
break;
case GenTreePutArgStk::Kind::PartialRepInstr:
printf(" (PartialRepInstr)");
break;
case GenTreePutArgStk::Kind::Unroll:
printf(" (Unroll)");
break;
case GenTreePutArgStk::Kind::Push:
printf(" (Push)");
break;
case GenTreePutArgStk::Kind::PushAllSlots:
printf(" (PushAllSlots)");
break;
default:
unreached();
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperGet() == GT_PUTARG_SPLIT)
{
const GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(),
putArg->gtNumRegs);
}
#endif
}
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
if (tree->OperIs(GT_FIELD))
{
if (FieldSeqStore::IsPseudoField(tree->AsField()->gtFldHnd))
{
printf(" #PseudoField:0x%x", tree->AsField()->gtFldOffset);
}
else
{
printf(" %s", eeGetFieldName(tree->AsField()->gtFldHnd), 0);
}
}
if (tree->gtOper == GT_INTRINSIC)
{
GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
switch (intrinsic->gtIntrinsicName)
{
case NI_System_Math_Abs:
printf(" abs");
break;
case NI_System_Math_Acos:
printf(" acos");
break;
case NI_System_Math_Acosh:
printf(" acosh");
break;
case NI_System_Math_Asin:
printf(" asin");
break;
case NI_System_Math_Asinh:
printf(" asinh");
break;
case NI_System_Math_Atan:
printf(" atan");
break;
case NI_System_Math_Atanh:
printf(" atanh");
break;
case NI_System_Math_Atan2:
printf(" atan2");
break;
case NI_System_Math_Cbrt:
printf(" cbrt");
break;
case NI_System_Math_Ceiling:
printf(" ceiling");
break;
case NI_System_Math_Cos:
printf(" cos");
break;
case NI_System_Math_Cosh:
printf(" cosh");
break;
case NI_System_Math_Exp:
printf(" exp");
break;
case NI_System_Math_Floor:
printf(" floor");
break;
case NI_System_Math_FMod:
printf(" fmod");
break;
case NI_System_Math_FusedMultiplyAdd:
printf(" fma");
break;
case NI_System_Math_ILogB:
printf(" ilogb");
break;
case NI_System_Math_Log:
printf(" log");
break;
case NI_System_Math_Log2:
printf(" log2");
break;
case NI_System_Math_Log10:
printf(" log10");
break;
case NI_System_Math_Max:
printf(" max");
break;
case NI_System_Math_Min:
printf(" min");
break;
case NI_System_Math_Pow:
printf(" pow");
break;
case NI_System_Math_Round:
printf(" round");
break;
case NI_System_Math_Sin:
printf(" sin");
break;
case NI_System_Math_Sinh:
printf(" sinh");
break;
case NI_System_Math_Sqrt:
printf(" sqrt");
break;
case NI_System_Math_Tan:
printf(" tan");
break;
case NI_System_Math_Tanh:
printf(" tanh");
break;
case NI_System_Math_Truncate:
printf(" truncate");
break;
case NI_System_Object_GetType:
printf(" objGetType");
break;
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
printf(" isKnownConst");
break;
default:
unreached();
}
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
if (tree->AsOp()->gtOp1 != nullptr)
{
// Label the child of the GT_COLON operator
// op1 is the else part
if (tree->gtOper == GT_COLON)
{
childMsg = "else";
}
else if (tree->gtOper == GT_QMARK)
{
childMsg = " if";
}
gtDispChild(tree->AsOp()->gtOp1, indentStack,
(tree->gtGetOp2IfPresent() == nullptr) ? IIArcBottom : IIArc, childMsg, topOnly);
}
if (tree->gtGetOp2IfPresent())
{
// Label the childMsgs of the GT_COLON operator
// op2 is the then part
if (tree->gtOper == GT_COLON)
{
childMsg = "then";
}
gtDispChild(tree->AsOp()->gtOp2, indentStack, IIArcBottom, childMsg, topOnly);
}
}
return;
}
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
// See what kind of a special operator we have here, and handle its special children.
switch (tree->gtOper)
{
case GT_FIELD_LIST:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
char offset[32];
sprintf_s(offset, sizeof(offset), "ofs %u", use.GetOffset());
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, offset);
}
}
break;
case GT_PHI:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
char block[32];
sprintf_s(block, sizeof(block), "pred " FMT_BB, use.GetNode()->AsPhiArg()->gtPredBB->bbNum);
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, block);
}
}
break;
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
GenTree* lastChild = nullptr;
call->VisitOperands([&lastChild](GenTree* operand) -> GenTree::VisitResult {
lastChild = operand;
return GenTree::VisitResult::Continue;
});
if (call->gtCallType != CT_INDIRECT)
{
const char* methodName;
const char* className;
methodName = eeGetMethodName(call->gtCallMethHnd, &className);
printf(" %s.%s", className, methodName);
}
if ((call->gtFlags & GTF_CALL_UNMANAGED) && (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH))
{
printf(" (FramesRoot last use)");
}
if (((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) && (call->gtInlineCandidateInfo != nullptr) &&
(call->gtInlineCandidateInfo->exactContextHnd != nullptr))
{
printf(" (exactContextHnd=0x%p)", dspPtr(call->gtInlineCandidateInfo->exactContextHnd));
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
char buf[64];
char* bufp;
bufp = &buf[0];
if ((call->gtCallThisArg != nullptr) && !call->gtCallThisArg->GetNode()->OperIs(GT_NOP, GT_ARGPLACE))
{
if (call->gtCallThisArg->GetNode()->OperIs(GT_ASG))
{
sprintf_s(bufp, sizeof(buf), "this SETUP%c", 0);
}
else
{
sprintf_s(bufp, sizeof(buf), "this in %s%c", compRegVarName(REG_ARG_0), 0);
}
gtDispChild(call->gtCallThisArg->GetNode(), indentStack,
(call->gtCallThisArg->GetNode() == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
}
if (call->gtCallArgs)
{
gtDispArgList(call, lastChild, indentStack);
}
if (call->gtCallType == CT_INDIRECT)
{
gtDispChild(call->gtCallAddr, indentStack, (call->gtCallAddr == lastChild) ? IIArcBottom : IIArc,
"calli tgt", topOnly);
}
if (call->gtControlExpr != nullptr)
{
gtDispChild(call->gtControlExpr, indentStack,
(call->gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr", topOnly);
}
int lateArgIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
IndentInfo arcType = (use.GetNext() == nullptr) ? IIArcBottom : IIArc;
gtGetLateArgMsg(call, use.GetNode(), lateArgIndex, bufp, sizeof(buf));
gtDispChild(use.GetNode(), indentStack, arcType, bufp, topOnly);
lateArgIndex++;
}
}
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD)
if (tree->OperIs(GT_SIMD))
{
printf(" %s %s", varTypeName(tree->AsSIMD()->GetSimdBaseType()),
simdIntrinsicNames[tree->AsSIMD()->GetSIMDIntrinsicId()]);
}
#endif // defined(FEATURE_SIMD)
#if defined(FEATURE_HW_INTRINSICS)
if (tree->OperIs(GT_HWINTRINSIC))
{
printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN
? ""
: varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()),
HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId()));
}
#endif // defined(FEATURE_HW_INTRINSICS)
gtDispCommonEndLine(tree);
if (!topOnly)
{
size_t index = 0;
size_t count = tree->AsMultiOp()->GetOperandCount();
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
gtDispChild(operand, indentStack, ++index < count ? IIArc : IIArcBottom, nullptr, topOnly);
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrElem()->gtArrObj, indentStack, IIArc, nullptr, topOnly);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
IndentInfo arcType = ((dim + 1) == tree->AsArrElem()->gtArrRank) ? IIArcBottom : IIArc;
gtDispChild(tree->AsArrElem()->gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
}
}
break;
case GT_ARR_OFFSET:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrOffs()->gtOffset, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtIndex, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_CMPXCHG:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsCmpXchg()->gtOpLocation, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpValue, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_STORE_DYN_BLK:
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsStoreDynBlk()->Addr(), indentStack, IIArc, nullptr, topOnly);
if (tree->AsStoreDynBlk()->Data() != nullptr)
{
gtDispChild(tree->AsStoreDynBlk()->Data(), indentStack, IIArc, nullptr, topOnly);
}
gtDispChild(tree->AsStoreDynBlk()->gtDynamicSize, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
default:
printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
printf(""); // null string means flush
break;
}
}
//------------------------------------------------------------------------
// gtGetArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// arg - The argument for which a message should be constructed
// argNum - The ordinal number of the arg in the argument list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength)
{
if (call->gtCallLateArgs != nullptr)
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(call, argNum);
assert(curArgTabEntry);
if (arg->gtFlags & GTF_LATE_ARG)
{
sprintf_s(bufp, bufLength, "arg%d SETUP%c", argNum, 0);
}
else
{
#ifdef TARGET_ARM
if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
#if FEATURE_FIXED_OUT_ARGS
sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0);
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
#endif
}
}
else
{
sprintf_s(bufp, bufLength, "arg%d%c", argNum, 0);
}
}
//------------------------------------------------------------------------
// gtGetLateArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// argx - The argument for which a message should be constructed
// lateArgIndex - The ordinal number of the arg in the lastArg list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetLateArgMsg(GenTreeCall* call, GenTree* argx, int lateArgIndex, char* bufp, unsigned bufLength)
{
assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(call, lateArgIndex);
assert(curArgTabEntry);
regNumber argReg = curArgTabEntry->GetRegNum();
#if FEATURE_FIXED_OUT_ARGS
if (argReg == REG_STK)
{
sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum, curArgTabEntry->GetByteOffset(), 0);
}
else
#endif
{
if (curArgTabEntry->use == call->gtCallThisArg)
{
sprintf_s(bufp, bufLength, "this in %s%c", compRegVarName(argReg), 0);
}
#ifdef TARGET_ARM
else if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
unsigned argNum = curArgTabEntry->argNum;
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
else
{
#if FEATURE_MULTIREG_ARGS
if (curArgTabEntry->numRegs >= 2)
{
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum, compRegVarName(argReg), separator,
compRegVarName(curArgTabEntry->GetRegNum(curArgTabEntry->numRegs - 1)), 0);
}
else
#endif
{
sprintf_s(bufp, bufLength, "arg%d in %s%c", curArgTabEntry->argNum, compRegVarName(argReg), 0);
}
}
}
}
//------------------------------------------------------------------------
// gtDispArgList: Dump the tree for a call arg list
//
// Arguments:
// call - the call to dump arguments for
// lastCallOperand - the call's last operand (to determine the arc types)
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
void Compiler::gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack)
{
unsigned argnum = 0;
if (call->gtCallThisArg != nullptr)
{
argnum++;
}
for (GenTreeCall::Use& use : call->Args())
{
GenTree* argNode = use.GetNode();
if (!argNode->IsNothingNode() && !argNode->IsArgPlaceHolderNode())
{
char buf[256];
gtGetArgMsg(call, argNode, argnum, buf, sizeof(buf));
gtDispChild(argNode, indentStack, (argNode == lastCallOperand) ? IIArcBottom : IIArc, buf, false);
}
argnum++;
}
}
// gtDispStmt: Print a statement to jitstdout.
//
// Arguments:
// stmt - the statement to be printed;
// msg - an additional message to print before the statement.
//
void Compiler::gtDispStmt(Statement* stmt, const char* msg /* = nullptr */)
{
if (opts.compDbgInfo)
{
if (msg != nullptr)
{
printf("%s ", msg);
}
printStmtID(stmt);
printf(" ( ");
const DebugInfo& di = stmt->GetDebugInfo();
// For statements in the root we display just the location without the
// inline context info.
if (di.GetInlineContext() == nullptr || di.GetInlineContext()->IsRoot())
{
di.GetLocation().Dump();
}
else
{
stmt->GetDebugInfo().Dump(false);
}
printf(" ... ");
IL_OFFSET lastILOffs = stmt->GetLastILOffset();
if (lastILOffs == BAD_IL_OFFSET)
{
printf("???");
}
else
{
printf("0x%03X", lastILOffs);
}
printf(" )");
DebugInfo par;
if (stmt->GetDebugInfo().GetParent(&par))
{
printf(" <- ");
par.Dump(true);
}
printf("\n");
}
gtDispTree(stmt->GetRootNode());
}
//------------------------------------------------------------------------
// gtDispBlockStmts: dumps all statements inside `block`.
//
// Arguments:
// block - the block to display statements for.
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
}
}
//------------------------------------------------------------------------
// Compiler::gtDispRange: dumps a range of LIR.
//
// Arguments:
// range - the range of LIR to display.
//
void Compiler::gtDispRange(LIR::ReadOnlyRange const& range)
{
for (GenTree* node : range)
{
gtDispLIRNode(node);
}
}
//------------------------------------------------------------------------
// Compiler::gtDispTreeRange: dumps the LIR range that contains all of the
// nodes in the dataflow tree rooted at a given
// node.
//
// Arguments:
// containingRange - the LIR range that contains the root node.
// tree - the root of the dataflow tree.
//
void Compiler::gtDispTreeRange(LIR::Range& containingRange, GenTree* tree)
{
bool unused;
gtDispRange(containingRange.GetTreeRange(tree, &unused));
}
//------------------------------------------------------------------------
// Compiler::gtDispLIRNode: dumps a single LIR node.
//
// Arguments:
// node - the LIR node to dump.
// prefixMsg - an optional prefix for each line of output.
//
void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr */)
{
auto displayOperand = [](GenTree* operand, const char* message, IndentInfo operandArc, IndentStack& indentStack,
size_t prefixIndent) {
assert(operand != nullptr);
assert(message != nullptr);
if (prefixIndent != 0)
{
printf("%*s", (int)prefixIndent, "");
}
// 50 spaces for alignment
printf("%-50s", "");
#if FEATURE_SET_FLAGS
// additional flag enlarges the flag field by one character
printf(" ");
#endif
indentStack.Push(operandArc);
indentStack.print();
indentStack.Pop();
operandArc = IIArc;
printf(" t%-5d %-6s %s\n", operand->gtTreeID, varTypeName(operand->TypeGet()), message);
};
IndentStack indentStack(this);
size_t prefixIndent = 0;
if (prefixMsg != nullptr)
{
prefixIndent = strlen(prefixMsg);
}
const int bufLength = 256;
char buf[bufLength];
const bool nodeIsCall = node->IsCall();
// Visit operands
IndentInfo operandArc = IIArcTop;
for (GenTree* operand : node->Operands())
{
if (operand->IsArgPlaceHolderNode() || !operand->IsValue())
{
// Either of these situations may happen with calls.
continue;
}
if (nodeIsCall)
{
GenTreeCall* call = node->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
sprintf_s(buf, sizeof(buf), "this in %s", compRegVarName(REG_ARG_0));
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallAddr)
{
displayOperand(operand, "calli tgt", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtControlExpr)
{
displayOperand(operand, "control expr", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallCookie)
{
displayOperand(operand, "cookie", operandArc, indentStack, prefixIndent);
}
else
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByNode(call, operand);
assert(curArgTabEntry);
if (!curArgTabEntry->isLateArg())
{
gtGetArgMsg(call, operand, curArgTabEntry->argNum, buf, sizeof(buf));
}
else
{
gtGetLateArgMsg(call, operand, curArgTabEntry->GetLateArgInx(), buf, sizeof(buf));
}
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_STORE_DYN_BLK))
{
if (operand == node->AsBlk()->Addr())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else if (operand == node->AsBlk()->Data())
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
else
{
assert(operand == node->AsStoreDynBlk()->gtDynamicSize);
displayOperand(operand, "size", operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_ASG))
{
if (operand == node->gtGetOp1())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
}
else
{
displayOperand(operand, "", operandArc, indentStack, prefixIndent);
}
operandArc = IIArc;
}
// Visit the operator
if (prefixMsg != nullptr)
{
printf("%s", prefixMsg);
}
const bool topOnly = true;
const bool isLIR = true;
gtDispTree(node, &indentStack, nullptr, topOnly, isLIR);
}
/*****************************************************************************/
#endif // DEBUG
/*****************************************************************************
*
* Check if the given node can be folded,
* and call the methods to perform the folding
*/
GenTree* Compiler::gtFoldExpr(GenTree* tree)
{
unsigned kind = tree->OperKind();
/* We must have a simple operation to fold */
// If we're in CSE, it's not safe to perform tree
// folding given that it can will potentially
// change considered CSE candidates.
if (optValnumCSE_phase)
{
return tree;
}
if (!(kind & GTK_SMPOP))
{
return tree;
}
GenTree* op1 = tree->AsOp()->gtOp1;
/* Filter out non-foldable trees that can have constant children */
assert(kind & (GTK_UNOP | GTK_BINOP));
switch (tree->gtOper)
{
case GT_RETFILT:
case GT_RETURN:
case GT_IND:
return tree;
default:
break;
}
/* try to fold the current node */
if ((kind & GTK_UNOP) && op1)
{
if (op1->OperIsConst())
{
return gtFoldExprConst(tree);
}
}
else if ((kind & GTK_BINOP) && op1 && tree->AsOp()->gtOp2 &&
// Don't take out conditionals for debugging
(opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->AsOp()->gtOp2;
// The atomic operations are exempted here because they are never computable statically;
// one of their arguments is an address.
if (op1->OperIsConst() && op2->OperIsConst() && !tree->OperIsAtomicOp())
{
/* both nodes are constants - fold the expression */
return gtFoldExprConst(tree);
}
else if (op1->OperIsConst() || op2->OperIsConst())
{
/* at least one is a constant - see if we have a
* special operator that can use only one constant
* to fold - e.g. booleans */
return gtFoldExprSpecial(tree);
}
else if (tree->OperIsCompare())
{
/* comparisons of two local variables can sometimes be folded */
return gtFoldExprCompare(tree);
}
}
/* Return the original node (folded/bashed or not) */
return tree;
}
//------------------------------------------------------------------------
// gtFoldExprCall: see if a call is foldable
//
// Arguments:
// call - call to examine
//
// Returns:
// The original call if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// Checks for calls to Type.op_Equality, Type.op_Inequality, and
// Enum.HasFlag, and if the call is to one of these,
// attempts to optimize.
GenTree* Compiler::gtFoldExprCall(GenTreeCall* call)
{
// Can only fold calls to special intrinsics.
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0)
{
return call;
}
// Defer folding if not optimizing.
if (opts.OptimizationDisabled())
{
return call;
}
// Check for a new-style jit intrinsic.
const NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
switch (ni)
{
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = call->gtCallThisArg->GetNode();
GenTree* flagOp = call->gtCallArgs->GetNode();
GenTree* result = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (result != nullptr)
{
return result;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
noway_assert(call->TypeGet() == TYP_INT);
GenTree* op1 = call->gtCallArgs->GetNode();
GenTree* op2 = call->gtCallArgs->GetNext()->GetNode();
// If either operand is known to be a RuntimeType, this can be folded
GenTree* result = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (result != nullptr)
{
return result;
}
break;
}
default:
break;
}
return call;
}
//------------------------------------------------------------------------
// gtFoldTypeEqualityCall: see if a (potential) type equality call is foldable
//
// Arguments:
// isEq -- is it == or != operator
// op1 -- first argument to call
// op2 -- second argument to call
//
// Returns:
// nulltpr if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// If either operand is known to be a a RuntimeType, then the type
// equality methods will simply check object identity and so we can
// fold the call into a simple compare of the call's operands.
GenTree* Compiler::gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2)
{
if ((gtGetTypeProducerKind(op1) == TPK_Unknown) && (gtGetTypeProducerKind(op2) == TPK_Unknown))
{
return nullptr;
}
const genTreeOps simpleOp = isEq ? GT_EQ : GT_NE;
JITDUMP("\nFolding call to Type:op_%s to a simple compare via %s\n", isEq ? "Equality" : "Inequality",
GenTree::OpName(simpleOp));
GenTree* compare = gtNewOperNode(simpleOp, TYP_INT, op1, op2);
return compare;
}
/*****************************************************************************
*
* Some comparisons can be folded:
*
* locA == locA
* classVarA == classVarA
* locA + locB == locB + locA
*
*/
GenTree* Compiler::gtFoldExprCompare(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
assert(tree->OperIsCompare());
/* Filter out cases that cannot be folded here */
/* Do not fold floats or doubles (e.g. NaN != Nan) */
if (varTypeIsFloating(op1->TypeGet()))
{
return tree;
}
// Currently we can only fold when the two subtrees exactly match
// and everything is side effect free.
//
if (((tree->gtFlags & GTF_SIDE_EFFECT) != 0) || !GenTree::Compare(op1, op2, true))
{
// No folding.
//
return tree;
}
// GTF_ORDER_SIDEEFF here may indicate volatile subtrees.
// Or it may indicate a non-null assertion prop into an indir subtree.
//
// Check the operands.
//
if ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)
{
// If op1 is "volatle" and op2 is not, we can still fold.
//
const bool op1MayBeVolatile = (op1->gtFlags & GTF_ORDER_SIDEEFF) != 0;
const bool op2MayBeVolatile = (op2->gtFlags & GTF_ORDER_SIDEEFF) != 0;
if (!op1MayBeVolatile || op2MayBeVolatile)
{
// No folding.
//
return tree;
}
}
GenTree* cons;
switch (tree->gtOper)
{
case GT_EQ:
case GT_LE:
case GT_GE:
cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
break;
case GT_NE:
case GT_LT:
case GT_GT:
cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
break;
default:
assert(!"Unexpected relOp");
return tree;
}
/* The node has beeen folded into 'cons' */
JITDUMP("\nFolding comparison with identical operands:\n");
DISPTREE(tree);
if (fgGlobalMorph)
{
fgMorphTreeDone(cons);
}
else
{
cons->gtNext = tree->gtNext;
cons->gtPrev = tree->gtPrev;
}
JITDUMP("Bashed to %s:\n", cons->AsIntConCommon()->IconValue() ? "true" : "false");
DISPTREE(cons);
return cons;
}
//------------------------------------------------------------------------
// gtCreateHandleCompare: generate a type handle comparison
//
// Arguments:
// oper -- comparison operation (equal/not equal)
// op1 -- first operand
// op2 -- second operand
// typeCheckInliningResult -- indicates how the comparison should happen
//
// Returns:
// Type comparison tree
//
GenTree* Compiler::gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult)
{
// If we can compare pointers directly, just emit the binary operation
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_PASS)
{
return gtNewOperNode(oper, TYP_INT, op1, op2);
}
assert(typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_USE_HELPER);
// Emit a call to a runtime helper
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1, op2);
GenTree* ret = gtNewHelperCallNode(CORINFO_HELP_ARE_TYPES_EQUIVALENT, TYP_INT, helperArgs);
if (oper == GT_EQ)
{
ret = gtNewOperNode(GT_NE, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
else
{
assert(oper == GT_NE);
ret = gtNewOperNode(GT_EQ, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
return ret;
}
//------------------------------------------------------------------------
// gtFoldTypeCompare: see if a type comparison can be further simplified
//
// Arguments:
// tree -- tree possibly comparing types
//
// Returns:
// An alternative tree if folding happens.
// Original tree otherwise.
//
// Notes:
// Checks for
// typeof(...) == obj.GetType()
// typeof(...) == typeof(...)
// obj1.GetType() == obj2.GetType()
//
// And potentially optimizes away the need to obtain actual
// RuntimeType objects to do the comparison.
GenTree* Compiler::gtFoldTypeCompare(GenTree* tree)
{
// Only handle EQ and NE
// (maybe relop vs null someday)
const genTreeOps oper = tree->OperGet();
if ((oper != GT_EQ) && (oper != GT_NE))
{
return tree;
}
// Screen for the right kinds of operands
GenTree* const op1 = tree->AsOp()->gtOp1;
const TypeProducerKind op1Kind = gtGetTypeProducerKind(op1);
if (op1Kind == TPK_Unknown)
{
return tree;
}
GenTree* const op2 = tree->AsOp()->gtOp2;
const TypeProducerKind op2Kind = gtGetTypeProducerKind(op2);
if (op2Kind == TPK_Unknown)
{
return tree;
}
// If both types are created via handles, we can simply compare
// handles instead of the types that they'd create.
if ((op1Kind == TPK_Handle) && (op2Kind == TPK_Handle))
{
JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
GenTree* op1ClassFromHandle = tree->AsOp()->gtOp1->AsCall()->gtCallArgs->GetNode();
GenTree* op2ClassFromHandle = tree->AsOp()->gtOp2->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE cls1Hnd = NO_CLASS_HANDLE;
CORINFO_CLASS_HANDLE cls2Hnd = NO_CLASS_HANDLE;
// Try and find class handles from op1 and op2
cls1Hnd = gtGetHelperArgClassHandle(op1ClassFromHandle);
cls2Hnd = gtGetHelperArgClassHandle(op2ClassFromHandle);
// If we have both class handles, try and resolve the type equality test completely.
bool resolveFailed = false;
if ((cls1Hnd != NO_CLASS_HANDLE) && (cls2Hnd != NO_CLASS_HANDLE))
{
JITDUMP("Asking runtime to compare %p (%s) and %p (%s) for equality\n", dspPtr(cls1Hnd),
info.compCompHnd->getClassName(cls1Hnd), dspPtr(cls2Hnd), info.compCompHnd->getClassName(cls2Hnd));
TypeCompareState s = info.compCompHnd->compareTypesForEquality(cls1Hnd, cls2Hnd);
if (s != TypeCompareState::May)
{
// Type comparison result is known.
const bool typesAreEqual = (s == TypeCompareState::Must);
const bool operatorIsEQ = (oper == GT_EQ);
const int compareResult = operatorIsEQ ^ typesAreEqual ? 0 : 1;
JITDUMP("Runtime reports comparison is known at jit time: %u\n", compareResult);
GenTree* result = gtNewIconNode(compareResult);
return result;
}
else
{
resolveFailed = true;
}
}
if (resolveFailed)
{
JITDUMP("Runtime reports comparison is NOT known at jit time\n");
}
else
{
JITDUMP("Could not find handle for %s%s\n", (cls1Hnd == NO_CLASS_HANDLE) ? " cls1" : "",
(cls2Hnd == NO_CLASS_HANDLE) ? " cls2" : "");
}
// We can't answer the equality comparison definitively at jit
// time, but can still simplify the comparison.
//
// Find out how we can compare the two handles.
// NOTE: We're potentially passing NO_CLASS_HANDLE, but the runtime knows what to do with it here.
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(cls1Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
// If the first type needs helper, check the other type: it might be okay with a simple compare.
if (inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER)
{
inliningKind = info.compCompHnd->canInlineTypeCheck(cls2Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
}
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, op1ClassFromHandle, op2ClassFromHandle, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
if ((op1Kind == TPK_GetType) && (op2Kind == TPK_GetType))
{
GenTree* arg1;
if (op1->OperGet() == GT_INTRINSIC)
{
arg1 = op1->AsUnOp()->gtOp1;
}
else
{
arg1 = op1->AsCall()->gtCallThisArg->GetNode();
}
arg1 = gtNewMethodTableLookup(arg1);
GenTree* arg2;
if (op2->OperGet() == GT_INTRINSIC)
{
arg2 = op2->AsUnOp()->gtOp1;
}
else
{
arg2 = op2->AsCall()->gtCallThisArg->GetNode();
}
arg2 = gtNewMethodTableLookup(arg2);
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(nullptr, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, arg1, arg2, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
// If one operand creates a type from a handle and the other operand is fetching the type from an object,
// we can sometimes optimize the type compare into a simpler
// method table comparison.
//
// TODO: if other operand is null...
if (!(((op1Kind == TPK_GetType) && (op2Kind == TPK_Handle)) ||
((op1Kind == TPK_Handle) && (op2Kind == TPK_GetType))))
{
return tree;
}
GenTree* const opHandle = (op1Kind == TPK_Handle) ? op1 : op2;
GenTree* const opOther = (op1Kind == TPK_Handle) ? op2 : op1;
// Tunnel through the handle operand to get at the class handle involved.
GenTree* const opHandleArgument = opHandle->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE clsHnd = gtGetHelperArgClassHandle(opHandleArgument);
// If we couldn't find the class handle, give up.
if (clsHnd == NO_CLASS_HANDLE)
{
return tree;
}
// Ask the VM if this type can be equality tested by a simple method
// table comparison.
CorInfoInlineTypeCheck typeCheckInliningResult =
info.compCompHnd->canInlineTypeCheck(clsHnd, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_NONE)
{
return tree;
}
// We're good to go.
JITDUMP("Optimizing compare of obj.GetType()"
" and type-from-handle to compare method table pointer\n");
// opHandleArgument is the method table we're looking for.
GenTree* const knownMT = opHandleArgument;
// Fetch object method table from the object itself.
GenTree* objOp = nullptr;
// Note we may see intrinsified or regular calls to GetType
if (opOther->OperGet() == GT_INTRINSIC)
{
objOp = opOther->AsUnOp()->gtOp1;
}
else
{
objOp = opOther->AsCall()->gtCallThisArg->GetNode();
}
bool pIsExact = false;
bool pIsNonNull = false;
CORINFO_CLASS_HANDLE objCls = gtGetClassHandle(objOp, &pIsExact, &pIsNonNull);
// if both classes are "final" (e.g. System.String[]) we can replace the comparison
// with `true/false` + null check.
if ((objCls != NO_CLASS_HANDLE) && (pIsExact || impIsClassExact(objCls)))
{
TypeCompareState tcs = info.compCompHnd->compareTypesForEquality(objCls, clsHnd);
if (tcs != TypeCompareState::May)
{
const bool operatorIsEQ = oper == GT_EQ;
const bool typesAreEqual = tcs == TypeCompareState::Must;
GenTree* compareResult = gtNewIconNode((operatorIsEQ ^ typesAreEqual) ? 0 : 1);
if (!pIsNonNull)
{
// we still have to emit a null-check
// obj.GetType == typeof() -> (nullcheck) true/false
GenTree* nullcheck = gtNewNullCheck(objOp, compCurBB);
return gtNewOperNode(GT_COMMA, tree->TypeGet(), nullcheck, compareResult);
}
else if (objOp->gtFlags & GTF_ALL_EFFECT)
{
return gtNewOperNode(GT_COMMA, tree->TypeGet(), objOp, compareResult);
}
else
{
return compareResult;
}
}
}
// Fetch the method table from the object
GenTree* const objMT = gtNewMethodTableLookup(objOp);
// Compare the two method tables
GenTree* const compare = gtCreateHandleCompare(oper, objMT, knownMT, typeCheckInliningResult);
// Drop any now irrelevant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// And we're done
return compare;
}
//------------------------------------------------------------------------
// gtGetHelperArgClassHandle: find the compile time class handle from
// a helper call argument tree
//
// Arguments:
// tree - tree that passes the handle to the helper
//
// Returns:
// The compile time class handle if known.
//
CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE result = NO_CLASS_HANDLE;
// Walk through any wrapping nop.
if ((tree->gtOper == GT_NOP) && (tree->gtType == TYP_I_IMPL))
{
tree = tree->AsOp()->gtOp1;
}
// The handle could be a literal constant
if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() == TYP_I_IMPL))
{
assert(tree->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)tree->AsIntCon()->gtCompileTimeHandle;
}
// Or the result of a runtime lookup
else if (tree->OperGet() == GT_RUNTIMELOOKUP)
{
result = tree->AsRuntimeLookup()->GetClassHandle();
}
// Or something reached indirectly
else if (tree->gtOper == GT_IND)
{
// The handle indirs we are looking for will be marked as non-faulting.
// Certain others (eg from refanytype) may not be.
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
GenTree* handleTreeInternal = tree->AsOp()->gtOp1;
if ((handleTreeInternal->OperGet() == GT_CNS_INT) && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
{
// These handle constants should be class handles.
assert(handleTreeInternal->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)handleTreeInternal->AsIntCon()->gtCompileTimeHandle;
}
}
}
return result;
}
//------------------------------------------------------------------------
// gtFoldExprSpecial -- optimize binary ops with one constant operand
//
// Arguments:
// tree - tree to optimize
//
// Return value:
// Tree (possibly modified at root or below), or a new tree
// Any new tree is fully morphed, if necessary.
//
GenTree* Compiler::gtFoldExprSpecial(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
genTreeOps oper = tree->OperGet();
GenTree* op;
GenTree* cons;
ssize_t val;
assert(tree->OperKind() & GTK_BINOP);
/* Filter out operators that cannot be folded here */
if (oper == GT_CAST)
{
return tree;
}
/* We only consider TYP_INT for folding
* Do not fold pointer arithmetic (e.g. addressing modes!) */
if (oper != GT_QMARK && !varTypeIsIntOrI(tree->gtType))
{
return tree;
}
/* Find out which is the constant node */
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
/* Get the constant value */
val = cons->AsIntConCommon()->IconValue();
// Transforms that would drop op cannot be performed if op has side effects
bool opHasSideEffects = (op->gtFlags & GTF_SIDE_EFFECT) != 0;
// Helper function that creates a new IntCon node and morphs it, if required
auto NewMorphedIntConNode = [&](int value) -> GenTreeIntCon* {
GenTreeIntCon* icon = gtNewIconNode(value);
if (fgGlobalMorph)
{
fgMorphTreeDone(icon);
}
return icon;
};
// Here `op` is the non-constant operand, `cons` is the constant operand
// and `val` is the constant value.
switch (oper)
{
case GT_LE:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 <= x) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_GE:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x >= 0) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_LT:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x < 0) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
break;
case GT_GT:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 > x) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
FALLTHROUGH;
case GT_EQ:
case GT_NE:
// Optimize boxed value classes; these are always false. This IL is
// generated when a generic value is tested against null:
// <T> ... foo(T x) { ... if ((object)x == null) ...
if ((val == 0) && op->IsBoxedValue())
{
JITDUMP("\nAttempting to optimize BOX(valueType) %s null [%06u]\n", GenTree::OpName(oper),
dspTreeID(tree));
// We don't expect GT_GT with signed compares, and we
// can't predict the result if we do see it, since the
// boxed object addr could have its high bit set.
if ((oper == GT_GT) && !tree->IsUnsigned())
{
JITDUMP(" bailing; unexpected signed compare via GT_GT\n");
}
else
{
// The tree under the box must be side effect free
// since we will drop it if we optimize.
assert(!gtTreeHasSideEffects(op->AsBox()->BoxOp(), GTF_SIDE_EFFECT));
// See if we can optimize away the box and related statements.
GenTree* boxSourceTree = gtTryRemoveBoxUpstreamEffects(op);
bool didOptimize = (boxSourceTree != nullptr);
// If optimization succeeded, remove the box.
if (didOptimize)
{
// Set up the result of the compare.
int compareResult = 0;
if (oper == GT_GT)
{
// GT_GT(null, box) == false
// GT_GT(box, null) == true
compareResult = (op1 == op);
}
else if (oper == GT_EQ)
{
// GT_EQ(box, null) == false
// GT_EQ(null, box) == false
compareResult = 0;
}
else
{
assert(oper == GT_NE);
// GT_NE(box, null) == true
// GT_NE(null, box) == true
compareResult = 1;
}
JITDUMP("\nSuccess: replacing BOX(valueType) %s null with %d\n", GenTree::OpName(oper),
compareResult);
return NewMorphedIntConNode(compareResult);
}
}
}
else
{
return gtFoldBoxNullable(tree);
}
break;
case GT_ADD:
if (val == 0)
{
goto DONE_FOLD;
}
break;
case GT_MUL:
if (val == 1)
{
goto DONE_FOLD;
}
else if (val == 0)
{
/* Multiply by zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_DIV:
case GT_UDIV:
if ((op2 == cons) && (val == 1) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_SUB:
if ((op2 == cons) && (val == 0) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_AND:
if (val == 0)
{
/* AND with zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
else
{
/* The GTF_BOOLEAN flag is set for nodes that are part
* of a boolean expression, thus all their children
* are known to evaluate to only 0 or 1 */
if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1
* AND with 1 stays the same */
assert(val == 1);
goto DONE_FOLD;
}
}
break;
case GT_OR:
if (val == 0)
{
goto DONE_FOLD;
}
else if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1 - OR with 1 is 1 */
assert(val == 1);
/* OR with one - return the 'one' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
if (val == 0)
{
if (op2 == cons)
{
goto DONE_FOLD;
}
else if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_QMARK:
{
assert(op1 == cons && op2 == op && op2->gtOper == GT_COLON);
assert(op2->AsOp()->gtOp1 && op2->AsOp()->gtOp2);
assert(val == 0 || val == 1);
if (val)
{
op = op2->AsColon()->ThenNode();
}
else
{
op = op2->AsColon()->ElseNode();
}
// Clear colon flags only if the qmark itself is not conditionaly executed
if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&op, gtClearColonCond);
}
}
goto DONE_FOLD;
default:
break;
}
/* The node is not foldable */
return tree;
DONE_FOLD:
/* The node has beeen folded into 'op' */
// If there was an assigment update, we just morphed it into
// a use, update the flags appropriately
if (op->gtOper == GT_LCL_VAR)
{
assert(tree->OperIs(GT_ASG) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_DEF)) == 0);
op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_DEF);
}
JITDUMP("\nFolding binary operator with a constant operand:\n");
DISPTREE(tree);
JITDUMP("Transformed into:\n");
DISPTREE(op);
return op;
}
//------------------------------------------------------------------------
// gtFoldBoxNullable -- optimize a boxed nullable feeding a compare to zero
//
// Arguments:
// tree - binop tree to potentially optimize, must be
// GT_GT, GT_EQ, or GT_NE
//
// Return value:
// Tree (possibly modified below the root).
//
GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
{
assert(tree->OperKind() & GTK_BINOP);
assert(tree->OperIs(GT_GT, GT_EQ, GT_NE));
genTreeOps const oper = tree->OperGet();
if ((oper == GT_GT) && !tree->IsUnsigned())
{
return tree;
}
GenTree* const op1 = tree->AsOp()->gtOp1;
GenTree* const op2 = tree->AsOp()->gtOp2;
GenTree* op;
GenTree* cons;
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
ssize_t const val = cons->AsIntConCommon()->IconValue();
if (val != 0)
{
return tree;
}
if (!op->IsCall())
{
return tree;
}
GenTreeCall* const call = op->AsCall();
if (!call->IsHelperCall(this, CORINFO_HELP_BOX_NULLABLE))
{
return tree;
}
JITDUMP("\nAttempting to optimize BOX_NULLABLE(&x) %s null [%06u]\n", GenTree::OpName(oper), dspTreeID(tree));
// Get the address of the struct being boxed
GenTree* const arg = call->gtCallArgs->GetNext()->GetNode();
if (arg->OperIs(GT_ADDR) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
CORINFO_CLASS_HANDLE nullableHnd = gtGetStructHandle(arg->AsOp()->gtOp1);
CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(nullableHnd, 0);
// Replace the box with an access of the nullable 'hasValue' field.
JITDUMP("\nSuccess: replacing BOX_NULLABLE(&x) [%06u] with x.hasValue\n", dspTreeID(op));
GenTree* newOp = gtNewFieldRef(TYP_BOOL, fieldHnd, arg, 0);
if (op == op1)
{
tree->AsOp()->gtOp1 = newOp;
}
else
{
tree->AsOp()->gtOp2 = newOp;
}
cons->gtType = TYP_INT;
}
return tree;
}
//------------------------------------------------------------------------
// gtTryRemoveBoxUpstreamEffects: given an unused value type box,
// try and remove the upstream allocation and unnecessary parts of
// the copy.
//
// Arguments:
// op - the box node to optimize
// options - controls whether and how trees are modified
// (see notes)
//
// Return Value:
// A tree representing the original value to box, if removal
// is successful/possible (but see note). nullptr if removal fails.
//
// Notes:
// Value typed box gets special treatment because it has associated
// side effects that can be removed if the box result is not used.
//
// By default (options == BR_REMOVE_AND_NARROW) this method will
// try and remove unnecessary trees and will try and reduce remaning
// operations to the minimal set, possibly narrowing the width of
// loads from the box source if it is a struct.
//
// To perform a trial removal, pass BR_DONT_REMOVE. This can be
// useful to determine if this optimization should only be
// performed if some other conditions hold true.
//
// To remove but not alter the access to the box source, pass
// BR_REMOVE_BUT_NOT_NARROW.
//
// To remove and return the tree for the type handle used for
// the boxed newobj, pass BR_REMOVE_BUT_NOT_NARROW_WANT_TYPE_HANDLE.
// This can be useful when the only part of the box that is "live"
// is its type.
//
// If removal fails, is is possible that a subsequent pass may be
// able to optimize. Blocking side effects may now be minimized
// (null or bounds checks might have been removed) or might be
// better known (inline return placeholder updated with the actual
// return expression). So the box is perhaps best left as is to
// help trigger this re-examination.
GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions options)
{
assert(op->IsBoxedValue());
// grab related parts for the optimization
GenTreeBox* box = op->AsBox();
Statement* asgStmt = box->gtAsgStmtWhenInlinedBoxValue;
Statement* copyStmt = box->gtCopyStmtWhenInlinedBoxValue;
JITDUMP("gtTryRemoveBoxUpstreamEffects: %s to %s of BOX (valuetype)"
" [%06u] (assign/newobj " FMT_STMT " copy " FMT_STMT "\n",
(options == BR_DONT_REMOVE) ? "checking if it is possible" : "attempting",
(options == BR_MAKE_LOCAL_COPY) ? "make local unboxed version" : "remove side effects", dspTreeID(op),
asgStmt->GetID(), copyStmt->GetID());
// If we don't recognize the form of the assign, bail.
GenTree* asg = asgStmt->GetRootNode();
if (asg->gtOper != GT_ASG)
{
JITDUMP(" bailing; unexpected assignment op %s\n", GenTree::OpName(asg->gtOper));
return nullptr;
}
// If we're eventually going to return the type handle, remember it now.
GenTree* boxTypeHandle = nullptr;
if ((options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE) || (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE))
{
GenTree* asgSrc = asg->AsOp()->gtOp2;
genTreeOps asgSrcOper = asgSrc->OperGet();
// Allocation may be via AllocObj or via helper call, depending
// on when this is invoked and whether the jit is using AllocObj
// for R2R allocations.
if (asgSrcOper == GT_ALLOCOBJ)
{
GenTreeAllocObj* allocObj = asgSrc->AsAllocObj();
boxTypeHandle = allocObj->AsOp()->gtOp1;
}
else if (asgSrcOper == GT_CALL)
{
GenTreeCall* newobjCall = asgSrc->AsCall();
GenTreeCall::Use* newobjArgs = newobjCall->gtCallArgs;
// In R2R expansions the handle may not be an explicit operand to the helper,
// so we can't remove the box.
if (newobjArgs == nullptr)
{
assert(newobjCall->IsHelperCall(this, CORINFO_HELP_READYTORUN_NEW));
JITDUMP(" bailing; newobj via R2R helper\n");
return nullptr;
}
boxTypeHandle = newobjArgs->GetNode();
}
else
{
unreached();
}
assert(boxTypeHandle != nullptr);
}
// If we don't recognize the form of the copy, bail.
GenTree* copy = copyStmt->GetRootNode();
if (copy->gtOper != GT_ASG)
{
// GT_RET_EXPR is a tolerable temporary failure.
// The jit will revisit this optimization after
// inlining is done.
if (copy->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy %s\n", GenTree::OpName(copy->gtOper));
}
else
{
// Anything else is a missed case we should
// figure out how to handle. One known case
// is GT_COMMAs enclosing the GT_ASG we are
// looking for.
JITDUMP(" bailing; unexpected copy op %s\n", GenTree::OpName(copy->gtOper));
}
return nullptr;
}
// Handle case where we are optimizing the box into a local copy
if (options == BR_MAKE_LOCAL_COPY)
{
// Drill into the box to get at the box temp local and the box type
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
assert(lvaTable[boxTempLcl].lvType == TYP_REF);
CORINFO_CLASS_HANDLE boxClass = lvaTable[boxTempLcl].lvClassHnd;
assert(boxClass != nullptr);
// Verify that the copyDst has the expected shape
// (blk|obj|ind (add (boxTempLcl, ptr-size)))
//
// The shape here is constrained to the patterns we produce
// over in impImportAndPushBox for the inlined box case.
GenTree* copyDst = copy->AsOp()->gtOp1;
if (!copyDst->OperIs(GT_BLK, GT_IND, GT_OBJ))
{
JITDUMP("Unexpected copy dest operator %s\n", GenTree::OpName(copyDst->gtOper));
return nullptr;
}
GenTree* copyDstAddr = copyDst->AsOp()->gtOp1;
if (copyDstAddr->OperGet() != GT_ADD)
{
JITDUMP("Unexpected copy dest address tree\n");
return nullptr;
}
GenTree* copyDstAddrOp1 = copyDstAddr->AsOp()->gtOp1;
if ((copyDstAddrOp1->OperGet() != GT_LCL_VAR) || (copyDstAddrOp1->AsLclVarCommon()->GetLclNum() != boxTempLcl))
{
JITDUMP("Unexpected copy dest address 1st addend\n");
return nullptr;
}
GenTree* copyDstAddrOp2 = copyDstAddr->AsOp()->gtOp2;
if (!copyDstAddrOp2->IsIntegralConst(TARGET_POINTER_SIZE))
{
JITDUMP("Unexpected copy dest address 2nd addend\n");
return nullptr;
}
// Screening checks have all passed. Do the transformation.
//
// Retype the box temp to be a struct
JITDUMP("Retyping box temp V%02u to struct %s\n", boxTempLcl, eeGetClassName(boxClass));
lvaTable[boxTempLcl].lvType = TYP_UNDEF;
const bool isUnsafeValueClass = false;
lvaSetStruct(boxTempLcl, boxClass, isUnsafeValueClass);
var_types boxTempType = lvaTable[boxTempLcl].lvType;
// Remove the newobj and assigment to box temp
JITDUMP("Bashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Update the copy from the value to be boxed to the box temp
GenTree* newDst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
copyDst->AsOp()->gtOp1 = newDst;
// Return the address of the now-struct typed box temp
GenTree* retValue = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
return retValue;
}
// If the copy is a struct copy, make sure we know how to isolate
// any source side effects.
GenTree* copySrc = copy->AsOp()->gtOp2;
// If the copy source is from a pending inline, wait for it to resolve.
if (copySrc->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy source %s\n", GenTree::OpName(copySrc->gtOper));
return nullptr;
}
bool hasSrcSideEffect = false;
bool isStructCopy = false;
if (gtTreeHasSideEffects(copySrc, GTF_SIDE_EFFECT))
{
hasSrcSideEffect = true;
if (varTypeIsStruct(copySrc->gtType))
{
isStructCopy = true;
if ((copySrc->gtOper != GT_OBJ) && (copySrc->gtOper != GT_IND) && (copySrc->gtOper != GT_FIELD))
{
// We don't know how to handle other cases, yet.
JITDUMP(" bailing; unexpected copy source struct op with side effect %s\n",
GenTree::OpName(copySrc->gtOper));
return nullptr;
}
}
}
// If this was a trial removal, we're done.
if (options == BR_DONT_REMOVE)
{
return copySrc;
}
if (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
// Otherwise, proceed with the optimization.
//
// Change the assignment expression to a NOP.
JITDUMP("\nBashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Change the copy expression so it preserves key
// source side effects.
JITDUMP("\nBashing COPY [%06u]", dspTreeID(copy));
if (!hasSrcSideEffect)
{
// If there were no copy source side effects just bash
// the copy to a NOP.
copy->gtBashToNOP();
JITDUMP(" to NOP; no source side effects.\n");
}
else if (!isStructCopy)
{
// For scalar types, go ahead and produce the
// value as the copy is fairly cheap and likely
// the optimizer can trim things down to just the
// minimal side effect parts.
copyStmt->SetRootNode(copySrc);
JITDUMP(" to scalar read via [%06u]\n", dspTreeID(copySrc));
}
else
{
// For struct types read the first byte of the
// source struct; there's no need to read the
// entire thing, and no place to put it.
assert(copySrc->OperIs(GT_OBJ, GT_IND, GT_FIELD));
copyStmt->SetRootNode(copySrc);
if (options == BR_REMOVE_AND_NARROW || options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
JITDUMP(" to read first byte of struct via modified [%06u]\n", dspTreeID(copySrc));
gtChangeOperToNullCheck(copySrc, compCurBB);
}
else
{
JITDUMP(" to read entire struct via modified [%06u]\n", dspTreeID(copySrc));
}
}
if (fgStmtListThreaded)
{
fgSetStmtSeq(asgStmt);
fgSetStmtSeq(copyStmt);
}
// Box effects were successfully optimized.
if (options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
else
{
return copySrc;
}
}
//------------------------------------------------------------------------
// gtOptimizeEnumHasFlag: given the operands for a call to Enum.HasFlag,
// try and optimize the call to a simple and/compare tree.
//
// Arguments:
// thisOp - first argument to the call
// flagOp - second argument to the call
//
// Return Value:
// A new cmp/amd tree if successful. nullptr on failure.
//
// Notes:
// If successful, may allocate new temps and modify connected
// statements.
GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp)
{
JITDUMP("Considering optimizing call to Enum.HasFlag....\n");
// Operands must be boxes
if (!thisOp->IsBoxedValue() || !flagOp->IsBoxedValue())
{
JITDUMP("bailing, need both inputs to be BOXes\n");
return nullptr;
}
// Operands must have same type
bool isExactThis = false;
bool isNonNullThis = false;
CORINFO_CLASS_HANDLE thisHnd = gtGetClassHandle(thisOp, &isExactThis, &isNonNullThis);
if (thisHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'this' operand\n");
return nullptr;
}
// A boxed thisOp should have exact type and non-null instance
assert(isExactThis);
assert(isNonNullThis);
bool isExactFlag = false;
bool isNonNullFlag = false;
CORINFO_CLASS_HANDLE flagHnd = gtGetClassHandle(flagOp, &isExactFlag, &isNonNullFlag);
if (flagHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'flag' operand\n");
return nullptr;
}
// A boxed flagOp should have exact type and non-null instance
assert(isExactFlag);
assert(isNonNullFlag);
if (flagHnd != thisHnd)
{
JITDUMP("bailing, operand types differ\n");
return nullptr;
}
// If we have a shared type instance we can't safely check type
// equality, so bail.
DWORD classAttribs = info.compCompHnd->getClassAttribs(thisHnd);
if (classAttribs & CORINFO_FLG_SHAREDINST)
{
JITDUMP("bailing, have shared instance type\n");
return nullptr;
}
// Simulate removing the box for thisOP. We need to know that it can
// be safely removed before we can optimize.
GenTree* thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_DONT_REMOVE);
if (thisVal == nullptr)
{
// Note we may fail here if the this operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'this' operand\n");
return nullptr;
}
// Do likewise with flagOp.
GenTree* flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_DONT_REMOVE);
if (flagVal == nullptr)
{
// Note we may fail here if the flag operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'flag' operand\n");
return nullptr;
}
// Only proceed when both box sources have the same actual type.
// (this rules out long/int mismatches)
if (genActualType(thisVal->TypeGet()) != genActualType(flagVal->TypeGet()))
{
JITDUMP("bailing, pre-boxed values have different types\n");
return nullptr;
}
// Yes, both boxes can be cleaned up. Optimize.
JITDUMP("Optimizing call to Enum.HasFlag\n");
// Undo the boxing of the Ops and prepare to operate directly
// on the pre-boxed values.
thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_REMOVE_BUT_NOT_NARROW);
flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_REMOVE_BUT_NOT_NARROW);
// Our trial removals above should guarantee successful removals here.
assert(thisVal != nullptr);
assert(flagVal != nullptr);
assert(genActualType(thisVal->TypeGet()) == genActualType(flagVal->TypeGet()));
// Type to use for optimized check
var_types type = genActualType(thisVal->TypeGet());
// The thisVal and flagVal trees come from earlier statements.
//
// Unless they are invariant values, we need to evaluate them both
// to temps at those points to safely transmit the values here.
//
// Also we need to use the flag twice, so we need two trees for it.
GenTree* thisValOpt = nullptr;
GenTree* flagValOpt = nullptr;
GenTree* flagValOptCopy = nullptr;
if (thisVal->IsIntegralConst())
{
thisValOpt = gtClone(thisVal);
assert(thisValOpt != nullptr);
}
else
{
const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp"));
GenTree* thisAsg = gtNewTempAssign(thisTmp, thisVal);
Statement* thisAsgStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
thisAsgStmt->SetRootNode(thisAsg);
thisValOpt = gtNewLclvNode(thisTmp, type);
}
if (flagVal->IsIntegralConst())
{
flagValOpt = gtClone(flagVal);
assert(flagValOpt != nullptr);
flagValOptCopy = gtClone(flagVal);
assert(flagValOptCopy != nullptr);
}
else
{
const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp"));
GenTree* flagAsg = gtNewTempAssign(flagTmp, flagVal);
Statement* flagAsgStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
flagAsgStmt->SetRootNode(flagAsg);
flagValOpt = gtNewLclvNode(flagTmp, type);
flagValOptCopy = gtNewLclvNode(flagTmp, type);
}
// Turn the call into (thisValTmp & flagTmp) == flagTmp.
GenTree* andTree = gtNewOperNode(GT_AND, type, thisValOpt, flagValOpt);
GenTree* cmpTree = gtNewOperNode(GT_EQ, TYP_INT, andTree, flagValOptCopy);
JITDUMP("Optimized call to Enum.HasFlag\n");
return cmpTree;
}
/*****************************************************************************
*
* Fold the given constant tree.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::gtFoldExprConst(GenTree* tree)
{
SSIZE_T i1, i2, itemp;
INT64 lval1, lval2, ltemp;
float f1, f2;
double d1, d2;
var_types switchType;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
assert(tree->OperIsUnary() || tree->OperIsBinary());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2IfPresent();
if (!opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
return tree;
}
if (tree->OperIs(GT_NOP, GT_ALLOCOBJ, GT_RUNTIMELOOKUP))
{
return tree;
}
// This condition exists to preserve previous behavior.
// TODO-CQ: enable folding for bounds checks nodes.
if (tree->OperIs(GT_BOUNDS_CHECK))
{
return tree;
}
#ifdef FEATURE_SIMD
if (tree->OperIs(GT_SIMD))
{
return tree;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (tree->OperIs(GT_HWINTRINSIC))
{
return tree;
}
#endif
if (tree->OperIsUnary())
{
assert(op1->OperIsConst());
switch (op1->TypeGet())
{
case TYP_INT:
// Fold constant INT unary operator.
if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = (INT32)op1->AsIntCon()->IconValue();
// If we fold a unary oper, then the folded constant
// is considered a ConstantIndexField if op1 was one.
if ((op1->AsIntCon()->gtFieldSeq != nullptr) && op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
switch (tree->OperGet())
{
case GT_NOT:
i1 = ~i1;
break;
case GT_NEG:
i1 = -i1;
break;
case GT_BSWAP:
i1 = ((i1 >> 24) & 0xFF) | ((i1 >> 8) & 0xFF00) | ((i1 << 8) & 0xFF0000) |
((i1 << 24) & 0xFF000000);
break;
case GT_BSWAP16:
i1 = ((i1 >> 8) & 0xFF) | ((i1 << 8) & 0xFF00);
break;
case GT_CAST:
// assert (genActualType(tree->CastToType()) == tree->TypeGet());
if (tree->gtOverflow() &&
CheckedOps::CastFromIntOverflows((INT32)i1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(i1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(i1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(i1));
goto CNS_INT;
case TYP_BOOL:
case TYP_UBYTE:
i1 = INT32(UINT8(i1));
goto CNS_INT;
case TYP_UINT:
case TYP_INT:
goto CNS_INT;
case TYP_ULONG:
if (tree->IsUnsigned())
{
lval1 = UINT64(UINT32(i1));
}
else
{
lval1 = UINT64(INT32(i1));
}
goto CNS_LONG;
case TYP_LONG:
if (tree->IsUnsigned())
{
lval1 = INT64(UINT32(i1));
}
else
{
lval1 = INT64(INT32(i1));
}
goto CNS_LONG;
case TYP_FLOAT:
if (tree->IsUnsigned())
{
f1 = forceCastToFloat(UINT32(i1));
}
else
{
f1 = forceCastToFloat(INT32(i1));
}
d1 = f1;
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (tree->IsUnsigned())
{
d1 = (double)UINT32(i1);
}
else
{
d1 = (double)INT32(i1);
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from int");
return tree;
}
default:
return tree;
}
goto CNS_INT;
case TYP_LONG:
// Fold constant LONG unary operator.
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
switch (tree->OperGet())
{
case GT_NOT:
lval1 = ~lval1;
break;
case GT_NEG:
lval1 = -lval1;
break;
case GT_BSWAP:
lval1 = ((lval1 >> 56) & 0xFF) | ((lval1 >> 40) & 0xFF00) | ((lval1 >> 24) & 0xFF0000) |
((lval1 >> 8) & 0xFF000000) | ((lval1 << 8) & 0xFF00000000) |
((lval1 << 24) & 0xFF0000000000) | ((lval1 << 40) & 0xFF000000000000) |
((lval1 << 56) & 0xFF00000000000000);
break;
case GT_CAST:
assert(tree->TypeIs(genActualType(tree->CastToType())));
if (tree->gtOverflow() &&
CheckedOps::CastFromLongOverflows(lval1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(lval1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(lval1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(lval1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(lval1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(lval1);
goto CNS_INT;
case TYP_UINT:
i1 = UINT32(lval1);
goto CNS_INT;
case TYP_ULONG:
case TYP_LONG:
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->IsUnsigned() && (lval1 < 0))
{
d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
}
else
{
d1 = (double)lval1;
}
if (tree->CastToType() == TYP_FLOAT)
{
f1 = forceCastToFloat(d1); // truncate precision
d1 = f1;
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from long");
return tree;
}
default:
return tree;
}
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
assert(op1->OperIs(GT_CNS_DBL));
// Fold constant DOUBLE unary operator.
d1 = op1->AsDblCon()->gtDconVal;
switch (tree->OperGet())
{
case GT_NEG:
d1 = -d1;
break;
case GT_CAST:
f1 = forceCastToFloat(d1);
if ((op1->TypeIs(TYP_DOUBLE) && CheckedOps::CastFromDoubleOverflows(d1, tree->CastToType())) ||
(op1->TypeIs(TYP_FLOAT) && CheckedOps::CastFromFloatOverflows(f1, tree->CastToType())))
{
// The conversion overflows. The ECMA spec says, in III 3.27, that
// "...if overflow occurs converting a floating point type to an integer, ...,
// the value returned is unspecified." However, it would at least be
// desirable to have the same value returned for casting an overflowing
// constant to an int as would be obtained by passing that constant as
// a parameter and then casting that parameter to an int type.
// Don't fold overflowing converions, as the value returned by
// JIT's codegen doesn't always match with the C compiler's cast result.
// We want the behavior to be the same with or without folding.
return tree;
}
assert(tree->TypeIs(genActualType(tree->CastToType())));
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(d1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(d1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(d1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(d1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(d1);
goto CNS_INT;
case TYP_UINT:
i1 = forceCastToUInt32(d1);
goto CNS_INT;
case TYP_LONG:
lval1 = INT64(d1);
goto CNS_LONG;
case TYP_ULONG:
lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
goto CNS_LONG;
case TYP_FLOAT:
d1 = forceCastToFloat(d1);
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (op1->TypeIs(TYP_FLOAT))
{
d1 = forceCastToFloat(d1); // Truncate precision.
}
goto CNS_DOUBLE; // Redundant cast.
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from double/float");
break;
}
return tree;
default:
return tree;
}
goto CNS_DOUBLE;
default:
// Not a foldable typ - e.g. RET const.
return tree;
}
}
// We have a binary operator.
assert(tree->OperIsBinary());
assert(op2 != nullptr);
assert(op1->OperIsConst());
assert(op2->OperIsConst());
if (tree->OperIs(GT_COMMA))
{
return op2;
}
switchType = op1->TypeGet();
// Normally we will just switch on op1 types, but for the case where
// only op2 is a GC type and op1 is not a GC type, we use the op2 type.
// This makes us handle this as a case of folding for GC type.
if (varTypeIsGC(op2->gtType) && !varTypeIsGC(op1->gtType))
{
switchType = op2->TypeGet();
}
switch (switchType)
{
// Fold constant REF of BYREF binary operator.
// These can only be comparisons or null pointers.
case TYP_REF:
// String nodes are an RVA at this point.
if (op1->OperIs(GT_CNS_STR) || op2->OperIs(GT_CNS_STR))
{
// Fold "ldstr" ==/!= null.
if (op2->IsIntegralConst(0))
{
if (tree->OperIs(GT_EQ))
{
i1 = 0;
goto FOLD_COND;
}
if (tree->OperIs(GT_NE) || (tree->OperIs(GT_GT) && tree->IsUnsigned()))
{
i1 = 1;
goto FOLD_COND;
}
}
return tree;
}
FALLTHROUGH;
case TYP_BYREF:
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (i1 == i2);
goto FOLD_COND;
case GT_NE:
i1 = (i1 != i2);
goto FOLD_COND;
case GT_ADD:
noway_assert(!tree->TypeIs(TYP_REF));
// We only fold a GT_ADD that involves a null reference.
if ((op1->TypeIs(TYP_REF) && (i1 == 0)) || (op2->TypeIs(TYP_REF) && (i2 == 0)))
{
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Fold into GT_IND of null byref.
tree->BashToConst(0, TYP_BYREF);
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("\nFolded to null byref:\n");
DISPTREE(tree);
goto DONE;
}
break;
default:
break;
}
return tree;
// Fold constant INT binary operator.
case TYP_INT:
assert(tree->TypeIs(TYP_INT) || varTypeIsGC(tree) || tree->OperIs(GT_MKREFANY));
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (INT32(i1) == INT32(i2));
break;
case GT_NE:
i1 = (INT32(i1) != INT32(i2));
break;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) < UINT32(i2));
}
else
{
i1 = (INT32(i1) < INT32(i2));
}
break;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) <= UINT32(i2));
}
else
{
i1 = (INT32(i1) <= INT32(i2));
}
break;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) >= UINT32(i2));
}
else
{
i1 = (INT32(i1) >= INT32(i2));
}
break;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) > UINT32(i2));
}
else
{
i1 = (INT32(i1) > INT32(i2));
}
break;
case GT_ADD:
itemp = i1 + i2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
break;
case GT_SUB:
itemp = i1 - i2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
break;
case GT_MUL:
itemp = i1 * i2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
// For the very particular case of the "constant array index" pseudo-field, we
// assume that multiplication is by the field width, and preserves that field.
// This could obviously be made more robust by a more complicated set of annotations...
if ((op1->AsIntCon()->gtFieldSeq != nullptr) &&
op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
else if ((op2->AsIntCon()->gtFieldSeq != nullptr) &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op1->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op2->AsIntCon()->gtFieldSeq;
}
i1 = itemp;
break;
case GT_OR:
i1 |= i2;
break;
case GT_XOR:
i1 ^= i2;
break;
case GT_AND:
i1 &= i2;
break;
case GT_LSH:
i1 <<= (i2 & 0x1f);
break;
case GT_RSH:
i1 >>= (i2 & 0x1f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
i1 = UINT32(i1) >> (i2 & 0x1f);
break;
case GT_ROL:
i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
break;
case GT_ROR:
i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
break;
// DIV and MOD can throw an exception - if the division is by 0
// or there is overflow - when dividing MIN by -1.
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (INT32(i2) == 0)
{
// Division by zero.
// We have to evaluate this expression and throw an exception.
return tree;
}
else if ((INT32(i2) == -1) && (UINT32(i1) == 0x80000000))
{
// Overflow Division.
// We have to evaluate this expression and throw an exception.
return tree;
}
if (tree->OperIs(GT_DIV))
{
i1 = INT32(i1) / INT32(i2);
}
else if (tree->OperIs(GT_MOD))
{
i1 = INT32(i1) % INT32(i2);
}
else if (tree->OperIs(GT_UDIV))
{
i1 = UINT32(i1) / UINT32(i2);
}
else
{
assert(tree->OperIs(GT_UMOD));
i1 = UINT32(i1) % UINT32(i2);
}
break;
default:
return tree;
}
// We get here after folding to a GT_CNS_INT type.
// change the node to the new type / value and make sure the node sizes are OK.
CNS_INT:
FOLD_COND:
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Also all conditional folding jumps here since the node hanging from
// GT_JTRUE has to be a GT_CNS_INT - value 0 or 1.
// Some operations are performed as 64 bit instead of 32 bit so the upper 32 bits
// need to be discarded. Since constant values are stored as ssize_t and the node
// has TYP_INT the result needs to be sign extended rather than zero extended.
tree->BashToConst(static_cast<int>(i1));
tree->AsIntCon()->gtFieldSeq = fieldSeq;
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to int constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant LONG binary operator.
case TYP_LONG:
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
// op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case
// it is a TYP_INT.
assert(op2->TypeIs(TYP_LONG, TYP_INT));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
// For the shift operators we can have a op2 that is a TYP_INT.
// Thus we cannot just use LngValue(), as it will assert on 32 bit if op2 is not GT_CNS_LNG.
lval2 = op2->AsIntConCommon()->IntegralValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (lval1 == lval2);
goto FOLD_COND;
case GT_NE:
i1 = (lval1 != lval2);
goto FOLD_COND;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) < UINT64(lval2));
}
else
{
i1 = (lval1 < lval2);
}
goto FOLD_COND;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) <= UINT64(lval2));
}
else
{
i1 = (lval1 <= lval2);
}
goto FOLD_COND;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) >= UINT64(lval2));
}
else
{
i1 = (lval1 >= lval2);
}
goto FOLD_COND;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) > UINT64(lval2));
}
else
{
i1 = (lval1 > lval2);
}
goto FOLD_COND;
case GT_ADD:
ltemp = lval1 + lval2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
#ifdef TARGET_64BIT
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
#endif
break;
case GT_SUB:
ltemp = lval1 - lval2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_MUL:
ltemp = lval1 * lval2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_OR:
lval1 |= lval2;
break;
case GT_XOR:
lval1 ^= lval2;
break;
case GT_AND:
lval1 &= lval2;
break;
case GT_LSH:
lval1 <<= (lval2 & 0x3f);
break;
case GT_RSH:
lval1 >>= (lval2 & 0x3f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
lval1 = UINT64(lval1) >> (lval2 & 0x3f);
break;
case GT_ROL:
lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
break;
case GT_ROR:
lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
break;
// Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
// that behavior here.
case GT_DIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 /= lval2;
break;
case GT_MOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 %= lval2;
break;
case GT_UDIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) / UINT64(lval2);
break;
case GT_UMOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) % UINT64(lval2);
break;
default:
return tree;
}
CNS_LONG:
#if !defined(TARGET_64BIT)
if (fieldSeq != FieldSeqStore::NotAField())
{
assert(!"Field sequences on CNS_LNG nodes!?");
return tree;
}
#endif // !defined(TARGET_64BIT)
JITDUMP("\nFolding long operator with constant nodes into a constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(lval1);
#ifdef TARGET_64BIT
tree->AsIntCon()->gtFieldSeq = fieldSeq;
#endif
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to long constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant FLOAT or DOUBLE binary operator
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->gtOverflowEx())
{
return tree;
}
assert(op1->OperIs(GT_CNS_DBL));
d1 = op1->AsDblCon()->gtDconVal;
assert(varTypeIsFloating(op2->TypeGet()));
assert(op2->OperIs(GT_CNS_DBL));
d2 = op2->AsDblCon()->gtDconVal;
// Special case - check if we have NaN operands.
// For comparisons if not an unordered operation always return 0.
// For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
// the result is always true - return 1.
if (_isnan(d1) || _isnan(d2))
{
JITDUMP("Double operator(s) is NaN\n");
if (tree->OperIsCompare())
{
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
// Unordered comparison with NaN always succeeds.
i1 = 1;
goto FOLD_COND;
}
else
{
// Normal comparison with NaN always fails.
i1 = 0;
goto FOLD_COND;
}
}
}
switch (tree->OperGet())
{
case GT_EQ:
i1 = (d1 == d2);
goto FOLD_COND;
case GT_NE:
i1 = (d1 != d2);
goto FOLD_COND;
case GT_LT:
i1 = (d1 < d2);
goto FOLD_COND;
case GT_LE:
i1 = (d1 <= d2);
goto FOLD_COND;
case GT_GE:
i1 = (d1 >= d2);
goto FOLD_COND;
case GT_GT:
i1 = (d1 > d2);
goto FOLD_COND;
// Floating point arithmetic should be done in declared
// precision while doing constant folding. For this reason though TYP_FLOAT
// constants are stored as double constants, while performing float arithmetic,
// double constants should be converted to float. Here is an example case
// where performing arithmetic in double precision would lead to incorrect
// results.
//
// Example:
// float a = float.MaxValue;
// float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
// precision.
// flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
case GT_ADD:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 + f2);
}
else
{
d1 += d2;
}
break;
case GT_SUB:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 - f2);
}
else
{
d1 -= d2;
}
break;
case GT_MUL:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 * f2);
}
else
{
d1 *= d2;
}
break;
case GT_DIV:
// We do not fold division by zero, even for floating point.
// This is because the result will be platform-dependent for an expression like 0d / 0d.
if (d2 == 0)
{
return tree;
}
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 / f2);
}
else
{
d1 /= d2;
}
break;
default:
return tree;
}
CNS_DOUBLE:
JITDUMP("\nFolding fp operator with constant nodes into a fp constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(d1, tree->TypeGet());
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to fp constant:\n");
DISPTREE(tree);
goto DONE;
default:
// Not a foldable type.
return tree;
}
DONE:
// Make sure no side effect flags are set on this constant node.
tree->gtFlags &= ~GTF_ALL_EFFECT;
return tree;
INTEGRAL_OVF:
// This operation is going to cause an overflow exception. Morph into
// an overflow helper. Put a dummy constant value for code generation.
//
// We could remove all subsequent trees in the current basic block,
// unless this node is a child of GT_COLON
//
// NOTE: Since the folded value is not constant we should not change the
// "tree" node - otherwise we confuse the logic that checks if the folding
// was successful - instead use one of the operands, e.g. op1.
// Don't fold overflow operations if not global morph phase.
// The reason for this is that this optimization is replacing a gentree node
// with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
// involving overflow arithmetic. During assertion prop, it is possible
// that the 'arg' could be constant folded and the result could lead to an
// overflow. In such a case 'arg' will get replaced with GT_COMMA node
// but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
// update args table. For this reason this optimization is enabled only
// for global morphing phase.
//
// TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
if (!fgGlobalMorph)
{
assert(tree->gtOverflow());
return tree;
}
var_types type = genActualType(tree->TypeGet());
op1 = type == TYP_LONG ? gtNewLconNode(0) : gtNewIconNode(0);
if (vnStore != nullptr)
{
op1->gtVNPair.SetBoth(vnStore->VNZeroForType(type));
}
JITDUMP("\nFolding binary operator with constant nodes into a comma throw:\n");
DISPTREE(tree);
// We will change the cast to a GT_COMMA and attach the exception helper as AsOp()->gtOp1.
// The constant expression zero becomes op2.
assert(tree->gtOverflow());
assert(tree->OperIs(GT_ADD, GT_SUB, GT_CAST, GT_MUL));
assert(op1 != nullptr);
op2 = op1;
op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW, TYP_VOID, gtNewCallArgs(gtNewIconNode(compCurBB->bbTryIndex)));
// op1 is a call to the JIT helper that throws an Overflow exception.
// Attach the ExcSet for VNF_OverflowExc(Void) to this call.
if (vnStore != nullptr)
{
op1->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc,
vnStore->VNPForVoid())));
}
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), op1, op2);
return tree;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// gtNewTempAssign: Create an assignment of the given value to a temp.
//
// Arguments:
// tmp - local number for a compiler temp
// val - value to assign to the temp
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// Normally a new assignment node.
// However may return a nop node if val is simply a reference to the temp.
//
// Notes:
// Self-assignments may be represented via NOPs.
//
// May update the type of the temp, if it was previously unknown.
//
// May set compFloatingPointUsed.
GenTree* Compiler::gtNewTempAssign(
unsigned tmp, GenTree* val, Statement** pAfterStmt, const DebugInfo& di, BasicBlock* block)
{
// Self-assignment is a nop.
if (val->OperGet() == GT_LCL_VAR && val->AsLclVarCommon()->GetLclNum() == tmp)
{
return gtNewNothingNode();
}
LclVarDsc* varDsc = lvaGetDesc(tmp);
if (varDsc->TypeGet() == TYP_I_IMPL && val->TypeGet() == TYP_BYREF)
{
impBashVarAddrsToI(val);
}
var_types valTyp = val->TypeGet();
if (val->OperGet() == GT_LCL_VAR && lvaTable[val->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
valTyp = lvaGetRealType(val->AsLclVar()->GetLclNum());
val->gtType = valTyp;
}
var_types dstTyp = varDsc->TypeGet();
/* If the variable's lvType is not yet set then set it here */
if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
#if FEATURE_SIMD
if (varTypeIsSIMD(dstTyp))
{
varDsc->lvSIMDType = 1;
}
#endif
}
#ifdef DEBUG
// Make sure the actual types match.
if (genActualType(valTyp) != genActualType(dstTyp))
{
// Plus some other exceptions that are apparently legal:
// 1) TYP_REF or BYREF = TYP_I_IMPL
bool ok = false;
if (varTypeIsGC(dstTyp) && (valTyp == TYP_I_IMPL))
{
ok = true;
}
// 2) TYP_DOUBLE = TYP_FLOAT or TYP_FLOAT = TYP_DOUBLE
else if (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))
{
ok = true;
}
// 3) TYP_BYREF = TYP_REF when object stack allocation is enabled
else if (JitConfig.JitObjectStackAllocation() && (dstTyp == TYP_BYREF) && (valTyp == TYP_REF))
{
ok = true;
}
else if (!varTypeIsGC(dstTyp) && (genTypeSize(valTyp) == genTypeSize(dstTyp)))
{
// We can have assignments that require a change of register file, e.g. for arguments
// and call returns. Lowering and Codegen will handle these.
ok = true;
}
else if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_INT))
{
// It could come from `ASG(struct, 0)` that was propagated to `RETURN struct(0)`,
// and now it is merging to a struct again.
assert(tmp == genReturnLocal);
ok = true;
}
else if (varTypeIsSIMD(dstTyp) && (valTyp == TYP_STRUCT))
{
assert(val->IsCall());
ok = true;
}
if (!ok)
{
gtDispTree(val);
assert(!"Incompatible types for gtNewTempAssign");
}
}
#endif
// Added this noway_assert for runtime\issue 44895, to protect against silent bad codegen
//
if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_REF))
{
noway_assert(!"Incompatible types for gtNewTempAssign");
}
// Floating Point assignments can be created during inlining
// see "Zero init inlinee locals:" in fgInlinePrependStatements
// thus we may need to set compFloatingPointUsed to true here.
//
if (varTypeUsesFloatReg(dstTyp) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
/* Create the assignment node */
GenTree* asg;
GenTree* dest = gtNewLclvNode(tmp, dstTyp);
dest->gtFlags |= GTF_VAR_DEF;
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
// a null type handle and derive the necessary information about the type from its varType.
CORINFO_CLASS_HANDLE valStructHnd = gtGetStructHandleIfPresent(val);
if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
{
// There are 2 special cases:
// 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
// the field was transformed as IND opr GT_LCL_FLD;
// 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
// in these cases, we can use the type of the merge return for the assignment.
assert(val->gtEffectiveVal(true)->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
assert(tmp == genReturnLocal);
valStructHnd = lvaGetStruct(genReturnLocal);
assert(valStructHnd != NO_CLASS_HANDLE);
}
if ((valStructHnd != NO_CLASS_HANDLE) && val->IsConstInitVal())
{
asg = gtNewAssignNode(dest, val);
}
else if (varTypeIsStruct(varDsc) && ((valStructHnd != NO_CLASS_HANDLE) || varTypeIsSIMD(valTyp)))
{
// The struct value may be be a child of a GT_COMMA due to explicit null checks of indirs/fields.
GenTree* valx = val->gtEffectiveVal(/*commaOnly*/ true);
if (valStructHnd != NO_CLASS_HANDLE)
{
lvaSetStruct(tmp, valStructHnd, false);
}
else
{
assert(valx->gtOper != GT_OBJ);
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block);
}
else
{
// We may have a scalar type variable assigned a struct value, e.g. a 'genReturnLocal'
// when the ABI calls for returning a struct as a primitive type.
// TODO-1stClassStructs: When we stop "lying" about the types for ABI purposes, the
// 'genReturnLocal' should be the original struct type.
assert(!varTypeIsStruct(valTyp) || ((valStructHnd != NO_CLASS_HANDLE) &&
(typGetObjLayout(valStructHnd)->GetSize() == genTypeSize(varDsc))));
asg = gtNewAssignNode(dest, val);
}
if (compRationalIRForm)
{
Rationalizer::RewriteAssignmentIntoStoreLcl(asg->AsOp());
}
return asg;
}
/*****************************************************************************
*
* Create a helper call to access a COM field (iff 'assg' is non-zero this is
* an assignment and 'assg' is the new value).
*/
GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg)
{
assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDR_HELPER);
/* If we can't access it directly, we need to call a helper function */
GenTreeCall::Use* args = nullptr;
var_types helperType = TYP_BYREF;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_SET)
{
assert(assg != nullptr);
// helper needs pointer to struct, not struct itself
if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(structType != nullptr);
assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true);
}
else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT)
{
assg = gtNewCastNode(TYP_DOUBLE, assg, false, TYP_DOUBLE);
}
else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE)
{
assg = gtNewCastNode(TYP_FLOAT, assg, false, TYP_FLOAT);
}
args = gtNewCallArgs(assg);
helperType = TYP_VOID;
}
else if (access & CORINFO_ACCESS_GET)
{
helperType = lclTyp;
// The calling convention for the helper does not take into
// account optimization of primitive structs.
if ((pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT) && !varTypeIsStruct(lclTyp))
{
helperType = TYP_STRUCT;
}
}
}
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT || pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(pFieldInfo->structType != nullptr);
args = gtPrependNewCallArg(gtNewIconEmbClsHndNode(pFieldInfo->structType), args);
}
GenTree* fieldHnd = impTokenToHandle(pResolvedToken);
if (fieldHnd == nullptr)
{ // compDonotInline()
return nullptr;
}
args = gtPrependNewCallArg(fieldHnd, args);
// If it's a static field, we shouldn't have an object node
// If it's an instance field, we have an object node
assert((pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == nullptr));
if (objPtr != nullptr)
{
args = gtPrependNewCallArg(objPtr, args);
}
GenTreeCall* call = gtNewHelperCallNode(pFieldInfo->helper, genActualType(helperType), args);
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(call))
{
call->InitializeStructReturnType(this, structType, call->GetUnmanagedCallConv());
}
#endif // FEATURE_MULTIREG_RET
GenTree* result = call;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_GET)
{
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT)
{
if (!varTypeIsStruct(lclTyp))
{
// get the result as primitive type
result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true);
result = gtNewOperNode(GT_IND, lclTyp, result);
}
}
else if (varTypeIsIntegral(lclTyp) && genTypeSize(lclTyp) < genTypeSize(TYP_INT))
{
// The helper does not extend the small return types.
result = gtNewCastNode(genActualType(lclTyp), result, false, lclTyp);
}
}
}
else
{
// OK, now do the indirection
if (access & CORINFO_ACCESS_GET)
{
if (varTypeIsStruct(lclTyp))
{
result = gtNewObjNode(structType, result);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
}
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF);
}
else if (access & CORINFO_ACCESS_SET)
{
if (varTypeIsStruct(lclTyp))
{
result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
result = gtNewAssignNode(result, assg);
}
}
}
return result;
}
/*****************************************************************************
*
* Return true if the given node (excluding children trees) contains side effects.
* Note that it does not recurse, and children need to be handled separately.
* It may return false even if the node has GTF_SIDE_EFFECT (because of its children).
*
* Similar to OperMayThrow() (but handles GT_CALLs specially), but considers
* assignments too.
*/
bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags)
{
if (flags & GTF_ASG)
{
// TODO-Bug: This only checks for GT_ASG/GT_STORE_DYN_BLK but according to OperRequiresAsgFlag
// there are many more opers that are considered to have an assignment side effect: atomic ops
// (GT_CMPXCHG & co.), GT_MEMORYBARRIER (not classified as an atomic op) and HW intrinsic
// memory stores. Atomic ops have special handling in gtExtractSideEffList but the others
// will simply be dropped is they are ever subject to an "extract side effects" operation.
// It is possible that the reason no bugs have yet been observed in this area is that the
// other nodes are likely to always be tree roots.
if (tree->OperIs(GT_ASG, GT_STORE_DYN_BLK))
{
return true;
}
}
// Are there only GTF_CALL side effects remaining? (and no other side effect kinds)
if (flags & GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
GenTreeCall* const call = tree->AsCall();
const bool ignoreExceptions = (flags & GTF_EXCEPT) == 0;
const bool ignoreCctors = (flags & GTF_IS_IN_CSE) != 0; // We can CSE helpers that run cctors.
if (!call->HasSideEffects(this, ignoreExceptions, ignoreCctors))
{
// If this call is otherwise side effect free, check its arguments.
for (GenTreeCall::Use& use : call->Args())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// I'm a little worried that args that assign to temps that are late args will look like
// side effects...but better to be conservative for now.
for (GenTreeCall::Use& use : call->LateArgs())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// Otherwise:
return false;
}
// Otherwise the GT_CALL is considered to have side-effects.
return true;
}
}
if (flags & GTF_EXCEPT)
{
if (tree->OperMayThrow(this))
{
return true;
}
}
// Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side
// effects (if we care about GTF_MAKE_CSE).
if ((flags & GTF_MAKE_CSE) && (tree->gtFlags & GTF_MAKE_CSE))
{
return true;
}
return false;
}
/*****************************************************************************
* Returns true if the expr tree has any side effects.
*/
bool Compiler::gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags /* = GTF_SIDE_EFFECT*/)
{
// These are the side effect flags that we care about for this tree
GenTreeFlags sideEffectFlags = tree->gtFlags & flags;
// Does this tree have any Side-effect flags set that we care about?
if (sideEffectFlags == 0)
{
// no it doesn't..
return false;
}
if (sideEffectFlags == GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
// Generally all trees that contain GT_CALL nodes are considered to have side-effects.
//
if (tree->AsCall()->gtCallType == CT_HELPER)
{
// If this node is a helper call we may not care about the side-effects.
// Note that gtNodeHasSideEffects checks the side effects of the helper itself
// as well as the side effects of its arguments.
return gtNodeHasSideEffects(tree, flags);
}
}
else if (tree->OperGet() == GT_INTRINSIC)
{
if (gtNodeHasSideEffects(tree, flags))
{
return true;
}
if (gtNodeHasSideEffects(tree->AsOp()->gtOp1, flags))
{
return true;
}
if ((tree->AsOp()->gtOp2 != nullptr) && gtNodeHasSideEffects(tree->AsOp()->gtOp2, flags))
{
return true;
}
return false;
}
}
return true;
}
GenTree* Compiler::gtBuildCommaList(GenTree* list, GenTree* expr)
{
// 'list' starts off as null,
// and when it is null we haven't started the list yet.
//
if (list != nullptr)
{
// Create a GT_COMMA that appends 'expr' in front of the remaining set of expressions in (*list)
GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, expr, list);
// Set the flags in the comma node
result->gtFlags |= (list->gtFlags & GTF_ALL_EFFECT);
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
DBEXEC(fgGlobalMorph, result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one (unless we are remorphing,
// in which case a prior transform involving either node may have discarded or otherwise invalidated the value
// numbers).
assert((list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined()) || !fgGlobalMorph);
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
if (list->gtVNPair.BothDefined() && expr->gtVNPair.BothDefined())
{
// The result of a GT_COMMA node is op2, the normal value number is op2vnp
// But we also need to include the union of side effects from op1 and op2.
// we compute this value into exceptions_vnp.
ValueNumPair op1vnp;
ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
ValueNumPair op2vnp;
ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(expr->gtVNPair, &op1vnp, &op1Xvnp);
vnStore->VNPUnpackExc(list->gtVNPair, &op2vnp, &op2Xvnp);
ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
return result;
}
else
{
// The 'expr' will start the list of expressions
return expr;
}
}
//------------------------------------------------------------------------
// gtExtractSideEffList: Extracts side effects from the given expression.
//
// Arguments:
// expr - the expression tree to extract side effects from
// pList - pointer to a (possibly null) GT_COMMA list that
// will contain the extracted side effects
// flags - side effect flags to be considered
// ignoreRoot - ignore side effects on the expression root node
//
// Notes:
// Side effects are prepended to the GT_COMMA list such that op1 of
// each comma node holds the side effect tree and op2 points to the
// next comma node. The original side effect execution order is preserved.
//
void Compiler::gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags flags /* = GTF_SIDE_EFFECT*/,
bool ignoreRoot /* = false */)
{
class SideEffectExtractor final : public GenTreeVisitor<SideEffectExtractor>
{
public:
const GenTreeFlags m_flags;
ArrayStack<GenTree*> m_sideEffects;
enum
{
DoPreOrder = true,
UseExecutionOrder = true
};
SideEffectExtractor(Compiler* compiler, GenTreeFlags flags)
: GenTreeVisitor(compiler), m_flags(flags), m_sideEffects(compiler->getAllocator(CMK_SideEffects))
{
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
GenTree* node = *use;
bool treeHasSideEffects = m_compiler->gtTreeHasSideEffects(node, m_flags);
if (treeHasSideEffects)
{
if (m_compiler->gtNodeHasSideEffects(node, m_flags))
{
PushSideEffects(node);
if (node->OperIsBlk() && !node->OperIsStoreBlk())
{
JITDUMP("Replace an unused OBJ/BLK node [%06d] with a NULLCHECK\n", dspTreeID(node));
m_compiler->gtChangeOperToNullCheck(node, m_compiler->compCurBB);
}
return Compiler::WALK_SKIP_SUBTREES;
}
// TODO-Cleanup: These have GTF_ASG set but for some reason gtNodeHasSideEffects ignores
// them. See the related gtNodeHasSideEffects comment as well.
// Also, these nodes must always be preserved, no matter what side effect flags are passed
// in. But then it should never be the case that gtExtractSideEffList gets called without
// specifying GTF_ASG so there doesn't seem to be any reason to be inconsistent with
// gtNodeHasSideEffects and make this check unconditionally.
if (node->OperIsAtomicOp())
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
if ((m_flags & GTF_EXCEPT) != 0)
{
// Special case - GT_ADDR of GT_IND nodes of TYP_STRUCT have to be kept together.
if (node->OperIs(GT_ADDR) && node->gtGetOp1()->OperIsIndir() &&
(node->gtGetOp1()->TypeGet() == TYP_STRUCT))
{
JITDUMP("Keep the GT_ADDR and GT_IND together:\n");
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
}
// Generally all GT_CALL nodes are considered to have side-effects.
// So if we get here it must be a helper call that we decided it does
// not have side effects that we needed to keep.
assert(!node->OperIs(GT_CALL) || (node->AsCall()->gtCallType == CT_HELPER));
}
if ((m_flags & GTF_IS_IN_CSE) != 0)
{
// If we're doing CSE then we also need to unmark CSE nodes. This will fail for CSE defs,
// those need to be extracted as if they're side effects.
if (!UnmarkCSE(node))
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
// The existence of CSE defs and uses is not propagated up the tree like side
// effects are. We need to continue visiting the tree as if it has side effects.
treeHasSideEffects = true;
}
return treeHasSideEffects ? Compiler::WALK_CONTINUE : Compiler::WALK_SKIP_SUBTREES;
}
private:
bool UnmarkCSE(GenTree* node)
{
assert(m_compiler->optValnumCSE_phase);
if (m_compiler->optUnmarkCSE(node))
{
// The call to optUnmarkCSE(node) should have cleared any CSE info.
assert(!IS_CSE_INDEX(node->gtCSEnum));
return true;
}
else
{
assert(IS_CSE_DEF(node->gtCSEnum));
#ifdef DEBUG
if (m_compiler->verbose)
{
printf("Preserving the CSE def #%02d at ", GET_CSE_INDEX(node->gtCSEnum));
m_compiler->printTreeID(node);
}
#endif
return false;
}
}
void PushSideEffects(GenTree* node)
{
// The extracted side effect will no longer be an argument, so unmark it.
// This is safe to do because the side effects will be visited in pre-order,
// aborting as soon as any tree is extracted. Thus if an argument for a call
// is being extracted, it is guaranteed that the call itself will not be.
node->gtFlags &= ~GTF_LATE_ARG;
m_sideEffects.Push(node);
}
};
SideEffectExtractor extractor(this, flags);
if (ignoreRoot)
{
for (GenTree* op : expr->Operands())
{
extractor.WalkTree(&op, nullptr);
}
}
else
{
extractor.WalkTree(&expr, nullptr);
}
GenTree* list = *pList;
// The extractor returns side effects in execution order but gtBuildCommaList prepends
// to the comma-based side effect list so we have to build the list in reverse order.
// This is also why the list cannot be built while traversing the tree.
// The number of side effects is usually small (<= 4), less than the ArrayStack's
// built-in size, so memory allocation is avoided.
while (!extractor.m_sideEffects.Empty())
{
list = gtBuildCommaList(list, extractor.m_sideEffects.Pop());
}
*pList = list;
}
/*****************************************************************************
*
* For debugging only - displays a tree node list and makes sure all the
* links are correctly set.
*/
#ifdef DEBUG
void dispNodeList(GenTree* list, bool verbose)
{
GenTree* last = nullptr;
GenTree* next;
if (!list)
{
return;
}
for (;;)
{
next = list->gtNext;
if (verbose)
{
printf("%08X -> %08X -> %08X\n", last, list, next);
}
assert(!last || last->gtNext == list);
assert(next == nullptr || next->gtPrev == list);
if (!next)
{
break;
}
last = list;
list = next;
}
printf(""); // null string means flush
}
#endif
/*****************************************************************************
* Callback to mark the nodes of a qmark-colon subtree that are conditionally
* executed.
*/
/* static */
Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTree** pTree, fgWalkData* data)
{
assert(data->pCallbackData == nullptr);
(*pTree)->gtFlags |= GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
* Callback to clear the conditionally executed flags of nodes that no longer
will be conditionally executed. Note that when we find another colon we must
stop, as the nodes below this one WILL be conditionally executed. This callback
is called when folding a qmark condition (ie the condition is constant).
*/
/* static */
Compiler::fgWalkResult Compiler::gtClearColonCond(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
assert(data->pCallbackData == nullptr);
if (tree->OperGet() == GT_COLON)
{
// Nodes below this will be conditionally executed.
return WALK_SKIP_SUBTREES;
}
tree->gtFlags &= ~GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Callback used by the tree walker to implement fgFindLink()
*/
static Compiler::fgWalkResult gtFindLinkCB(GenTree** pTree, Compiler::fgWalkData* cbData)
{
Compiler::FindLinkData* data = (Compiler::FindLinkData*)cbData->pCallbackData;
if (*pTree == data->nodeToFind)
{
data->result = pTree;
data->parent = cbData->parent;
return Compiler::WALK_ABORT;
}
return Compiler::WALK_CONTINUE;
}
Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node)
{
FindLinkData data = {node, nullptr, nullptr};
fgWalkResult result = fgWalkTreePre(stmt->GetRootNodePointer(), gtFindLinkCB, &data);
if (result == WALK_ABORT)
{
assert(data.nodeToFind == *data.result);
return data;
}
else
{
return {node, nullptr, nullptr};
}
}
/*****************************************************************************
*
* Callback that checks if a tree node has oper type GT_CATCH_ARG
*/
static Compiler::fgWalkResult gtFindCatchArg(GenTree** pTree, Compiler::fgWalkData* /* data */)
{
return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT : Compiler::WALK_CONTINUE;
}
/*****************************************************************************/
bool Compiler::gtHasCatchArg(GenTree* tree)
{
if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) && (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// gtHasCallOnStack:
//
// Arguments:
// parentStack: a context (stack of parent nodes)
//
// Return Value:
// returns true if any of the parent nodes are a GT_CALL
//
// Assumptions:
// We have a stack of parent nodes. This generally requires that
// we are performing a recursive tree walk using struct fgWalkData
//
//------------------------------------------------------------------------
/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack* parentStack)
{
for (int i = 0; i < parentStack->Height(); i++)
{
GenTree* node = parentStack->Top(i);
if (node->OperGet() == GT_CALL)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// gtGetTypeProducerKind: determine if a tree produces a runtime type, and
// if so, how.
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// TypeProducerKind for the tree.
//
// Notes:
// Checks to see if this tree returns a RuntimeType value, and if so,
// how that value is determined.
//
// Currently handles these cases
// 1) The result of Object::GetType
// 2) The result of typeof(...)
// 3) A null reference
// 4) Tree is otherwise known to have type RuntimeType
//
// The null reference case is surprisingly common because operator
// overloading turns the otherwise innocuous
//
// Type t = ....;
// if (t == null)
//
// into a method call.
Compiler::TypeProducerKind Compiler::gtGetTypeProducerKind(GenTree* tree)
{
if (tree->gtOper == GT_CALL)
{
if (tree->AsCall()->gtCallType == CT_HELPER)
{
if (gtIsTypeHandleToRuntimeTypeHelper(tree->AsCall()))
{
return TPK_Handle;
}
}
else if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(tree->AsCall()->gtCallMethHnd) == NI_System_Object_GetType)
{
return TPK_GetType;
}
}
}
else if ((tree->gtOper == GT_INTRINSIC) && (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType))
{
return TPK_GetType;
}
else if ((tree->gtOper == GT_CNS_INT) && (tree->AsIntCon()->gtIconVal == 0))
{
return TPK_Null;
}
else
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull);
if (clsHnd != NO_CLASS_HANDLE && clsHnd == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
{
return TPK_Other;
}
}
return TPK_Unknown;
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHelperCall -- see if tree is constructing
// a RuntimeType from a handle
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call)
{
return call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) ||
call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHandleHelperCall -- see if tree is constructing
// a RuntimeTypeHandle from a handle
//
// Arguments:
// tree - tree to examine
// pHelper - optional pointer to a variable that receives the type of the helper
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper)
{
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
}
else if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL;
}
if (pHelper != nullptr)
{
*pHelper = helper;
}
return helper != CORINFO_HELP_UNDEF;
}
bool Compiler::gtIsActiveCSE_Candidate(GenTree* tree)
{
return (optValnumCSE_phase && IS_CSE_INDEX(tree->gtCSEnum));
}
/*****************************************************************************/
struct ComplexityStruct
{
unsigned m_numNodes;
unsigned m_nodeLimit;
ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit)
{
}
};
static Compiler::fgWalkResult ComplexityExceedsWalker(GenTree** pTree, Compiler::fgWalkData* data)
{
ComplexityStruct* pComplexity = (ComplexityStruct*)data->pCallbackData;
if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit)
{
return Compiler::WALK_ABORT;
}
else
{
return Compiler::WALK_CONTINUE;
}
}
bool Compiler::gtComplexityExceeds(GenTree** tree, unsigned limit)
{
ComplexityStruct complexity(limit);
if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT)
{
return true;
}
else
{
return false;
}
}
bool GenTree::IsPhiNode()
{
return (OperGet() == GT_PHI_ARG) || (OperGet() == GT_PHI) || IsPhiDefn();
}
bool GenTree::IsPhiDefn()
{
bool res = ((OperGet() == GT_ASG) && (AsOp()->gtOp2 != nullptr) && (AsOp()->gtOp2->OperGet() == GT_PHI)) ||
((OperGet() == GT_STORE_LCL_VAR) && (AsOp()->gtOp1 != nullptr) && (AsOp()->gtOp1->OperGet() == GT_PHI));
assert(!res || OperGet() == GT_STORE_LCL_VAR || AsOp()->gtOp1->OperGet() == GT_LCL_VAR);
return res;
}
// IsPartialLclFld: Check for a GT_LCL_FLD whose type is a different size than the lclVar.
//
// Arguments:
// comp - the Compiler object.
//
// Return Value:
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR
bool GenTree::IsPartialLclFld(Compiler* comp)
{
return ((gtOper == GT_LCL_FLD) &&
(comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType)));
}
bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
GenTreeBlk* blkNode = nullptr;
if (OperIs(GT_ASG))
{
if (AsOp()->gtOp1->IsLocal())
{
GenTreeLclVarCommon* lclVarTree = AsOp()->gtOp1->AsLclVarCommon();
*pLclVarTree = lclVarTree;
if (pIsEntire != nullptr)
{
if (lclVarTree->IsPartialLclFld(comp))
{
*pIsEntire = false;
}
else
{
*pIsEntire = true;
}
}
return true;
}
else if (AsOp()->gtOp1->OperGet() == GT_IND)
{
GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1;
return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire);
}
else if (AsOp()->gtOp1->OperIsBlk())
{
blkNode = AsOp()->gtOp1->AsBlk();
}
}
else if (OperIsBlk())
{
blkNode = this->AsBlk();
}
if (blkNode != nullptr)
{
GenTree* destAddr = blkNode->Addr();
unsigned width = blkNode->Size();
// Do we care about whether this assigns the entire variable?
if (pIsEntire != nullptr && blkNode->OperIs(GT_STORE_DYN_BLK))
{
GenTree* blockWidth = blkNode->AsStoreDynBlk()->gtDynamicSize;
if (blockWidth->IsCnsIntOrI())
{
assert(blockWidth->AsIntConCommon()->FitsInI32());
width = static_cast<unsigned>(blockWidth->AsIntConCommon()->IconValue());
if (width == 0)
{
return false;
}
}
}
return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
// Otherwise...
return false;
}
// Returns true if this GenTree defines a result which is based on the address of a local.
bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
if (OperGet() == GT_ADDR || OperGet() == GT_LCL_VAR_ADDR)
{
GenTree* addrArg = this;
if (OperGet() == GT_ADDR)
{
addrArg = AsOp()->gtOp1;
}
if (addrArg->IsLocal() || addrArg->OperIsLocalAddr())
{
GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon();
*pLclVarTree = addrArgLcl;
if (pIsEntire != nullptr)
{
unsigned lclOffset = addrArgLcl->GetLclOffs();
if (lclOffset != 0)
{
// We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false
*pIsEntire = false;
}
else
{
unsigned lclNum = addrArgLcl->GetLclNum();
unsigned varWidth = comp->lvaLclExactSize(lclNum);
if (comp->lvaTable[lclNum].lvNormalizeOnStore())
{
// It's normalize on store, so use the full storage width -- writing to low bytes won't
// necessarily yield a normalized value.
varWidth = genTypeStSz(var_types(comp->lvaTable[lclNum].lvType)) * sizeof(int);
}
*pIsEntire = (varWidth == width);
}
}
return true;
}
else if (addrArg->OperGet() == GT_IND)
{
// A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND
return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
}
// Post rationalization we could have GT_IND(GT_LEA(..)) trees.
else if (OperGet() == GT_LEA)
{
// This method gets invoked during liveness computation and therefore it is critical
// that we don't miss 'use' of any local. The below logic is making the assumption
// that in case of LEA(base, index, offset) - only base can be a GT_LCL_VAR_ADDR
// and index is not.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
GenTree* index = AsOp()->gtOp2;
if (index != nullptr)
{
assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire));
}
#endif // DEBUG
// base
GenTree* base = AsOp()->gtOp1;
if (base != nullptr)
{
// Lea could have an Indir as its base.
if (base->OperGet() == GT_IND)
{
base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true);
}
return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsLocalExpr: Determine if this is a LclVarCommon node and return some
// additional info about it in the two out parameters.
//
// Arguments:
// comp - The Compiler instance
// pLclVarTree - An "out" argument that returns the local tree as a
// LclVarCommon, if it is indeed local.
// pFldSeq - An "out" argument that returns the value numbering field
// sequence for the node, if any.
//
// Return Value:
// Returns true, and sets the out arguments accordingly, if this is
// a LclVarCommon node.
bool GenTree::IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
if (IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = AsLclVarCommon();
if (OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
// If this tree evaluates some sum of a local address and some constants,
// return the node for the local being addressed
GenTreeLclVarCommon* GenTree::IsLocalAddrExpr()
{
if (OperGet() == GT_ADDR)
{
return AsOp()->gtOp1->IsLocal() ? AsOp()->gtOp1->AsLclVarCommon() : nullptr;
}
else if (OperIsLocalAddr())
{
return this->AsLclVarCommon();
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp2->IsLocalAddrExpr();
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp1->IsLocalAddrExpr();
}
}
// Otherwise...
return nullptr;
}
//------------------------------------------------------------------------
// IsLocalAddrExpr: finds if "this" is an address of a local var/fld.
//
// Arguments:
// comp - a compiler instance;
// pLclVarTree - [out] sets to the node indicating the local variable if found;
// pFldSeq - [out] sets to the field sequence representing the field, else null;
// pOffset - [out](optional) sets to the sum offset of the lcl/fld if found,
// note it does not include pLclVarTree->GetLclOffs().
//
// Returns:
// Returns true if "this" represents the address of a local, or a field of a local.
//
// Notes:
// It is mostly used for optimizations but assertion propagation depends on it for correctness.
// So if this function does not recognize a def of a LCL_VAR we can have an incorrect optimization.
//
bool GenTree::IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset /* = nullptr */)
{
if (OperGet() == GT_ADDR)
{
assert(!comp->compRationalIRForm);
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = addrArg->AsLclVarCommon();
if (addrArg->OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(addrArg->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
else if (OperIsLocalAddr())
{
*pLclVarTree = this->AsLclVarCommon();
if (this->OperGet() == GT_LCL_FLD_ADDR)
{
*pFldSeq = comp->GetFieldSeqStore()->Append(this->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp1->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp2->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp2->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp1->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsImplicitByrefParameterValue: determine if this tree is the entire
// value of a local implicit byref parameter
//
// Arguments:
// compiler -- compiler instance
//
// Return Value:
// GenTreeLclVar node for the local, or nullptr.
//
GenTreeLclVar* GenTree::IsImplicitByrefParameterValue(Compiler* compiler)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
GenTreeLclVar* lcl = nullptr;
if (OperIs(GT_LCL_VAR))
{
lcl = AsLclVar();
}
else if (OperIs(GT_OBJ))
{
GenTree* addr = AsIndir()->Addr();
if (addr->OperIs(GT_LCL_VAR))
{
lcl = addr->AsLclVar();
}
else if (addr->OperIs(GT_ADDR))
{
GenTree* base = addr->AsOp()->gtOp1;
if (base->OperIs(GT_LCL_VAR))
{
lcl = base->AsLclVar();
}
}
}
if ((lcl != nullptr) && compiler->lvaIsImplicitByRefLocal(lcl->GetLclNum()))
{
return lcl;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return nullptr;
}
//------------------------------------------------------------------------
// IsLclVarUpdateTree: Determine whether this is an assignment tree of the
// form Vn = Vn 'oper' 'otherTree' where Vn is a lclVar
//
// Arguments:
// pOtherTree - An "out" argument in which 'otherTree' will be returned.
// pOper - An "out" argument in which 'oper' will be returned.
//
// Return Value:
// If the tree is of the above form, the lclNum of the variable being
// updated is returned, and 'pOtherTree' and 'pOper' are set.
// Otherwise, returns BAD_VAR_NUM.
//
// Notes:
// 'otherTree' can have any shape.
// We avoid worrying about whether the op is commutative by only considering the
// first operand of the rhs. It is expected that most trees of this form will
// already have the lclVar on the lhs.
// TODO-CQ: Evaluate whether there are missed opportunities due to this, or
// whether gtSetEvalOrder will already have put the lclVar on the lhs in
// the cases of interest.
unsigned GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps* pOper)
{
unsigned lclNum = BAD_VAR_NUM;
if (OperIs(GT_ASG))
{
GenTree* lhs = AsOp()->gtOp1;
GenTree* rhs = AsOp()->gtOp2;
if ((lhs->OperGet() == GT_LCL_VAR) && rhs->OperIsBinary())
{
unsigned lhsLclNum = lhs->AsLclVarCommon()->GetLclNum();
GenTree* rhsOp1 = rhs->AsOp()->gtOp1;
GenTree* rhsOp2 = rhs->AsOp()->gtOp2;
// Some operators, such as HWINTRINSIC, are currently declared as binary but
// may not have two operands. We must check that both operands actually exist.
if ((rhsOp1 != nullptr) && (rhsOp2 != nullptr) && (rhsOp1->OperGet() == GT_LCL_VAR) &&
(rhsOp1->AsLclVarCommon()->GetLclNum() == lhsLclNum))
{
lclNum = lhsLclNum;
*pOtherTree = rhsOp2;
*pOper = rhs->OperGet();
}
}
}
return lclNum;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// canBeContained: check whether this tree node may be a subcomponent of its parent for purposes
// of code generation.
//
// Return Value:
// True if it is possible to contain this node and false otherwise.
//
bool GenTree::canBeContained() const
{
assert(OperIsLIR());
if (IsMultiRegLclVar())
{
return false;
}
if (gtHasReg(nullptr))
{
return false;
}
// It is not possible for nodes that do not produce values or that are not containable values to be contained.
if (!IsValue() || ((DebugOperKind() & DBK_NOCONTAIN) != 0) || (OperIsHWIntrinsic() && !isContainableHWIntrinsic()))
{
return false;
}
return true;
}
#endif // DEBUG
//------------------------------------------------------------------------
// isContained: check whether this tree node is a subcomponent of its parent for codegen purposes
//
// Return Value:
// Returns true if there is no code generated explicitly for this node.
// Essentially, it will be rolled into the code generation for the parent.
//
// Assumptions:
// This method relies upon the value of the GTF_CONTAINED flag.
// Therefore this method is only valid after Lowering.
// Also note that register allocation or other subsequent phases may cause
// nodes to become contained (or not) and therefore this property may change.
//
bool GenTree::isContained() const
{
assert(OperIsLIR());
const bool isMarkedContained = ((gtFlags & GTF_CONTAINED) != 0);
#ifdef DEBUG
if (!canBeContained())
{
assert(!isMarkedContained);
}
// these actually produce a register (the flags reg, we just don't model it)
// and are a separate instruction from the branch that consumes the result.
// They can only produce a result if the child is a SIMD equality comparison.
else if (OperIsCompare())
{
assert(isMarkedContained == false);
}
// if it's contained it can't be unused.
if (isMarkedContained)
{
assert(!IsUnusedValue());
}
#endif // DEBUG
return isMarkedContained;
}
// return true if node is contained and an indir
bool GenTree::isContainedIndir() const
{
return OperIsIndir() && isContained();
}
bool GenTree::isIndirAddrMode()
{
return OperIsIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
{
return OperGet() == GT_IND || OperGet() == GT_STOREIND;
}
bool GenTreeIndir::HasBase()
{
return Base() != nullptr;
}
bool GenTreeIndir::HasIndex()
{
return Index() != nullptr;
}
GenTree* GenTreeIndir::Base()
{
GenTree* addr = Addr();
if (isIndirAddrMode())
{
GenTree* result = addr->AsAddrMode()->Base();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
}
}
GenTree* GenTreeIndir::Index()
{
if (isIndirAddrMode())
{
GenTree* result = Addr()->AsAddrMode()->Index();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return nullptr;
}
}
unsigned GenTreeIndir::Scale()
{
if (HasIndex())
{
return Addr()->AsAddrMode()->gtScale;
}
else
{
return 1;
}
}
ssize_t GenTreeIndir::Offset()
{
if (isIndirAddrMode())
{
return Addr()->AsAddrMode()->Offset();
}
else if (Addr()->gtOper == GT_CLS_VAR_ADDR)
{
return static_cast<ssize_t>(reinterpret_cast<intptr_t>(Addr()->AsClsVar()->gtClsVarHnd));
}
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
{
return Addr()->AsIntConCommon()->IconValue();
}
else
{
return 0;
}
}
//------------------------------------------------------------------------
// GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if this immediate value requires us to record a relocation for it; false otherwise.
bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
{
return comp->opts.compReloc && (gtOper == GT_CNS_INT) && IsIconHandle();
}
//------------------------------------------------------------------------
// ImmedValCanBeFolded: can this immediate value be folded for op?
//
// Arguments:
// comp - Compiler instance
// op - Tree operator
//
// Return Value:
// True if this immediate value can be folded for op; false otherwise.
bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
{
// In general, immediate values that need relocations can't be folded.
// There are cases where we do want to allow folding of handle comparisons
// (e.g., typeof(T) == typeof(int)).
return !ImmedValNeedsReloc(comp) || (op == GT_EQ) || (op == GT_NE);
}
#ifdef TARGET_AMD64
// Returns true if this absolute address fits within the base of an addr mode.
// On Amd64 this effectively means, whether an absolute indirect address can
// be encoded as 32-bit offset relative to IP or zero.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
// During Jitting, we are allowed to generate non-relocatable code.
// On Amd64 we can encode an absolute indirect addr as an offset relative to zero or RIP.
// An absolute indir addr that can fit within 32-bits can ben encoded as an offset relative
// to zero. All other absolute indir addr could be attempted to be encoded as RIP relative
// based on reloc hint provided by VM. RIP relative encoding is preferred over relative
// to zero, because the former is one byte smaller than the latter. For this reason
// we check for reloc hint first and then whether addr fits in 32-bits next.
//
// VM starts off with an initial state to allow both data and code address to be encoded as
// pc-relative offsets. Hence JIT will attempt to encode all absolute addresses as pc-relative
// offsets. It is possible while jitting a method, an address could not be encoded as a
// pc-relative offset. In that case VM will note the overflow and will trigger re-jitting
// of the method with reloc hints turned off for all future methods. Second time around
// jitting will succeed since JIT will not attempt to encode data addresses as pc-relative
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
}
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue());
}
}
#elif defined(TARGET_X86)
// Returns true if this absolute address fits within the base of an addr mode.
// On x86 all addresses are 4-bytes and can be directly encoded in an addr mode.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
return IsCnsIntOrI();
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
// If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// IsFieldAddr: Is "this" a static or class field address?
//
// Recognizes the following patterns:
// this: ADD(baseAddr, CONST [FldSeq])
// this: ADD(CONST [FldSeq], baseAddr)
// this: CONST [FldSeq]
// this: Zero [FldSeq]
//
// Arguments:
// comp - the Compiler object
// pBaseAddr - [out] parameter for "the base address"
// pFldSeq - [out] parameter for the field sequence
//
// Return Value:
// If "this" matches patterns denoted above, and the FldSeq found is "full",
// i. e. starts with a class field or a static field, and includes all the
// struct fields that this tree represents the address of, this method will
// return "true" and set either "pBaseAddr" to some value, which must be used
// by the caller as the key into the "first field map" to obtain the actual
// value for the field. For instance fields, "base address" will be the object
// reference, for statics - the address to which the field offset with the
// field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField".
//
bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq)
{
assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF));
*pBaseAddr = nullptr;
*pFldSeq = FieldSeqStore::NotAField();
GenTree* baseAddr = nullptr;
FieldSeqNode* fldSeq = FieldSeqStore::NotAField();
if (OperIs(GT_ADD))
{
// If one operand has a field sequence, the other operand must not have one
// as the order of fields in that case would not be well-defined.
if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle())
{
assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle());
fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle());
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp1;
}
if (baseAddr != nullptr)
{
assert(!baseAddr->TypeIs(TYP_REF) || !comp->GetZeroOffsetFieldMap()->Lookup(baseAddr));
}
}
else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL))
{
assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr));
fldSeq = AsIntCon()->gtFieldSeq;
baseAddr = nullptr;
}
else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq))
{
baseAddr = this;
}
else
{
return false;
}
assert(fldSeq != nullptr);
if ((fldSeq == FieldSeqStore::NotAField()) || fldSeq->IsPseudoField())
{
return false;
}
// The above screens out obviously invalid cases, but we have more checks to perform. The
// sequence returned from this method *must* start with either a class (NOT struct) field
// or a static field. To avoid the expense of calling "getFieldClass" here, we will instead
// rely on the invariant that TYP_REF base addresses can never appear for struct fields - we
// will effectively treat such cases ("possible" in unsafe code) as undefined behavior.
if (comp->eeIsFieldStatic(fldSeq->GetFieldHandle()))
{
// TODO-VNTypes: this code is out of sync w.r.t. boxed statics that are numbered with
// VNF_PtrToStatic and treated as "simple" while here we treat them as "complex".
// TODO-VNTypes: we will always return the "baseAddr" here for now, but strictly speaking,
// we only need to do that if we have a shared field, to encode the logical "instantiation"
// argument. In all other cases, this serves no purpose and just leads to redundant maps.
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
if (baseAddr->TypeIs(TYP_REF))
{
assert(!comp->eeIsValueClass(comp->info.compCompHnd->getFieldClass(fldSeq->GetFieldHandle())));
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
// This case is reached, for example, if we have a chain of struct fields that are based on
// some pointer. We do not model such cases because we do not model maps for ByrefExposed
// memory, as it does not have the non-aliasing property of GcHeap and reference types.
return false;
}
bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd)
{
if (fieldNodeType != TYP_REF)
{
return false;
}
noway_assert(fldHnd != nullptr);
CorInfoType cit = info.compCompHnd->getFieldType(fldHnd);
var_types fieldTyp = JITtype2varType(cit);
return fieldTyp != TYP_REF;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// gtGetSIMDZero: Get a zero value of the appropriate SIMD type.
//
// Arguments:
// var_types - The simdType
// simdBaseJitType - The SIMD base JIT type we need
// simdHandle - The handle for the SIMD type
//
// Return Value:
// A node generating the appropriate Zero, if we are able to discern it,
// otherwise null (note that this shouldn't happen, but callers should
// be tolerant of this case).
GenTree* Compiler::gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle)
{
bool found = false;
bool isHWSIMD = true;
noway_assert(m_simdHandleCache != nullptr);
// First, determine whether this is Vector<T>.
if (simdType == getSIMDVectorType())
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
found = (simdHandle == m_simdHandleCache->SIMDFloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
found = (simdHandle == m_simdHandleCache->SIMDDoubleHandle);
break;
case CORINFO_TYPE_INT:
found = (simdHandle == m_simdHandleCache->SIMDIntHandle);
break;
case CORINFO_TYPE_USHORT:
found = (simdHandle == m_simdHandleCache->SIMDUShortHandle);
break;
case CORINFO_TYPE_UBYTE:
found = (simdHandle == m_simdHandleCache->SIMDUByteHandle);
break;
case CORINFO_TYPE_SHORT:
found = (simdHandle == m_simdHandleCache->SIMDShortHandle);
break;
case CORINFO_TYPE_BYTE:
found = (simdHandle == m_simdHandleCache->SIMDByteHandle);
break;
case CORINFO_TYPE_LONG:
found = (simdHandle == m_simdHandleCache->SIMDLongHandle);
break;
case CORINFO_TYPE_UINT:
found = (simdHandle == m_simdHandleCache->SIMDUIntHandle);
break;
case CORINFO_TYPE_ULONG:
found = (simdHandle == m_simdHandleCache->SIMDULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
found = (simdHandle == m_simdHandleCache->SIMDNIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
found = (simdHandle == m_simdHandleCache->SIMDNUIntHandle);
break;
default:
break;
}
if (found)
{
isHWSIMD = false;
}
}
if (!found)
{
// We must still have isHWSIMD set to true, and the only non-HW types left are the fixed types.
switch (simdType)
{
case TYP_SIMD8:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector2Handle)
{
isHWSIMD = false;
}
#if defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector64FloatHandle);
}
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector64IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector64UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector64UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector64ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector64ByteHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector64UIntHandle);
#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
break;
default:
break;
}
break;
case TYP_SIMD12:
assert((simdBaseJitType == CORINFO_TYPE_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
isHWSIMD = false;
break;
case TYP_SIMD16:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector4Handle)
{
isHWSIMD = false;
}
#if defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector128FloatHandle);
}
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector128DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector128IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector128UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector128UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector128ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector128ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector128LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector128UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector128ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector128NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector128NUIntHandle);
break;
#endif // defined(FEATURE_HW_INTRINSICS)
default:
break;
}
break;
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
assert(simdHandle == m_simdHandleCache->Vector256FloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector256DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector256IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector256UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector256UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector256ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector256ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector256LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector256UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector256ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector256NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector256NUIntHandle);
break;
default:
break;
}
break;
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
default:
break;
}
}
unsigned size = genTypeSize(simdType);
if (isHWSIMD)
{
#if defined(FEATURE_HW_INTRINSICS)
return gtNewSimdZeroNode(simdType, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ false);
#else
JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType),
varTypeName(JitType2PreciseVarType(simdBaseJitType)));
return nullptr;
#endif // FEATURE_HW_INTRINSICS
}
else
{
return gtNewSIMDVectorZero(simdType, simdBaseJitType, size);
}
}
#endif // FEATURE_SIMD
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
tree = tree->gtEffectiveVal();
if (varTypeIsStruct(tree->gtType))
{
switch (tree->gtOper)
{
default:
break;
case GT_MKREFANY:
structHnd = impGetRefAnyClass();
break;
case GT_OBJ:
structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
break;
case GT_BLK:
structHnd = tree->AsBlk()->GetLayout()->GetClassHandle();
break;
case GT_CALL:
structHnd = tree->AsCall()->gtRetClsHnd;
break;
case GT_RET_EXPR:
structHnd = tree->AsRetExpr()->gtRetClsHnd;
break;
case GT_ARGPLACE:
structHnd = tree->AsArgPlace()->gtArgPlaceClsHnd;
break;
case GT_INDEX:
structHnd = tree->AsIndex()->gtStructElemClass;
break;
case GT_FIELD:
info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd);
break;
case GT_ASG:
structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1());
break;
case GT_LCL_FLD:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
#endif
break;
case GT_LCL_VAR:
{
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
structHnd = lvaGetStruct(lclNum);
break;
}
case GT_RETURN:
structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1);
break;
case GT_IND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
else
#endif
{
// Attempt to find a handle for this expression.
// We can do this for an array element indirection, or for a field indirection.
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
structHnd = arrInfo.m_elemStructType;
}
else
{
GenTree* addr = tree->AsIndir()->Addr();
FieldSeqNode* fieldSeq = nullptr;
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->OperIs(GT_CNS_INT))
{
fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
}
else
{
GetZeroOffsetFieldMap()->Lookup(addr, &fieldSeq);
}
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
{
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &structHnd);
// With unsafe code and type casts
// this can return a primitive type and have nullptr for structHnd
// see runtime/issues/38541
}
}
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->GetSimdBaseJitType());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((tree->gtFlags & GTF_SIMDASHW_OP) != 0)
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
else
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
break;
#endif
break;
}
// TODO-1stClassStructs: add a check that `structHnd != NO_CLASS_HANDLE`,
// nowadays it won't work because the right part of an ASG could have struct type without a handle
// (check `fgMorphBlockOperand(isBlkReqd`) and a few other cases.
}
return structHnd;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree);
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
//------------------------------------------------------------------------
// gtGetClassHandle: find class handle for a ref type
//
// Arguments:
// tree -- tree to find handle for
// pIsExact [out] -- whether handle is exact type
// pIsNonNull [out] -- whether tree value is known not to be null
//
// Return Value:
// nullptr if class handle is unknown,
// otherwise the class handle.
// *pIsExact set true if tree type is known to be exactly the handle type,
// otherwise actual type may be a subtype.
// *pIsNonNull set true if tree value is known not to be null,
// otherwise a null value is possible.
CORINFO_CLASS_HANDLE Compiler::gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull)
{
// Set default values for our out params.
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
// Bail out if we're just importing and not generating code, since
// the jit uses TYP_REF for CORINFO_TYPE_VAR locals and args, but
// these may not be ref types.
if (compIsForImportOnly())
{
return objClass;
}
// Bail out if the tree is not a ref type.
var_types treeType = tree->TypeGet();
if (treeType != TYP_REF)
{
return objClass;
}
// Tunnel through commas.
GenTree* obj = tree->gtEffectiveVal(false);
const genTreeOps objOp = obj->OperGet();
switch (objOp)
{
case GT_COMMA:
{
// gtEffectiveVal above means we shouldn't see commas here.
assert(!"unexpected GT_COMMA");
break;
}
case GT_LCL_VAR:
{
// For locals, pick up type info from the local table.
const unsigned objLcl = obj->AsLclVar()->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
break;
}
case GT_FIELD:
{
// For fields, get the type from the field handle.
CORINFO_FIELD_HANDLE fieldHnd = obj->AsField()->gtFldHnd;
if (fieldHnd != nullptr)
{
objClass = gtGetFieldClassHandle(fieldHnd, pIsExact, pIsNonNull);
}
break;
}
case GT_RET_EXPR:
{
// If we see a RET_EXPR, recurse through to examine the
// return value expression.
GenTree* retExpr = tree->AsRetExpr()->gtInlineCandidate;
objClass = gtGetClassHandle(retExpr, pIsExact, pIsNonNull);
break;
}
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
if ((ni == NI_System_Array_Clone) || (ni == NI_System_Object_MemberwiseClone))
{
objClass = gtGetClassHandle(call->gtCallThisArg->GetNode(), pIsExact, pIsNonNull);
break;
}
CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(call->gtCallMethHnd);
if (specialObjClass != nullptr)
{
objClass = specialObjClass;
*pIsExact = true;
*pIsNonNull = true;
break;
}
}
if (call->IsInlineCandidate())
{
// For inline candidates, we've already cached the return
// type class handle in the inline info.
InlineCandidateInfo* inlInfo = call->gtInlineCandidateInfo;
assert(inlInfo != nullptr);
// Grab it as our first cut at a return type.
assert(inlInfo->methInfo.args.retType == CORINFO_TYPE_CLASS);
objClass = inlInfo->methInfo.args.retTypeClass;
// If the method is shared, the above may not capture
// the most precise return type information (that is,
// it may represent a shared return type and as such,
// have instances of __Canon). See if we can use the
// context to get at something more definite.
//
// For now, we do this here on demand rather than when
// processing the call, but we could/should apply
// similar sharpening to the argument and local types
// of the inlinee.
const unsigned retClassFlags = info.compCompHnd->getClassAttribs(objClass);
if (retClassFlags & CORINFO_FLG_SHAREDINST)
{
CORINFO_CONTEXT_HANDLE context = inlInfo->exactContextHnd;
if (context != nullptr)
{
CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(context);
// Grab the signature in this context.
CORINFO_SIG_INFO sig;
eeGetMethodSig(call->gtCallMethHnd, &sig, exactClass);
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
}
else if (call->gtCallType == CT_USER_FUNC)
{
// For user calls, we can fetch the approximate return
// type info from the method handle. Unfortunately
// we've lost the exact context, so this is the best
// we can do for now.
CORINFO_METHOD_HANDLE method = call->gtCallMethHnd;
CORINFO_CLASS_HANDLE exactClass = nullptr;
CORINFO_SIG_INFO sig;
eeGetMethodSig(method, &sig, exactClass);
if (sig.retType == CORINFO_TYPE_VOID)
{
// This is a constructor call.
const unsigned methodFlags = info.compCompHnd->getMethodAttribs(method);
assert((methodFlags & CORINFO_FLG_CONSTRUCTOR) != 0);
objClass = info.compCompHnd->getMethodClass(method);
*pIsExact = true;
*pIsNonNull = true;
}
else
{
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
else if (call->gtCallType == CT_HELPER)
{
objClass = gtGetHelperCallClassHandle(call, pIsExact, pIsNonNull);
}
break;
}
case GT_INTRINSIC:
{
GenTreeIntrinsic* intrinsic = obj->AsIntrinsic();
if (intrinsic->gtIntrinsicName == NI_System_Object_GetType)
{
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsExact = false;
*pIsNonNull = true;
}
break;
}
case GT_CNS_STR:
{
// For literal strings, we know the class and that the
// value is not null.
objClass = impGetStringClass();
*pIsExact = true;
*pIsNonNull = true;
break;
}
case GT_IND:
{
GenTreeIndir* indir = obj->AsIndir();
if (indir->HasBase() && !indir->HasIndex())
{
// indir(addr(lcl)) --> lcl
//
// This comes up during constrained callvirt on ref types.
GenTree* base = indir->Base();
GenTreeLclVarCommon* lcl = base->IsLocalAddrExpr();
if ((lcl != nullptr) && (base->OperGet() != GT_ADD))
{
const unsigned objLcl = lcl->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
}
else if (base->OperGet() == GT_ARR_ELEM)
{
// indir(arr_elem(...)) -> array element type
GenTree* array = base->AsArrElem()->gtArrObj;
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
}
else if (base->OperGet() == GT_ADD)
{
// This could be a static field access.
//
// See if op1 is a static field base helper call
// and if so, op2 will have the field info.
GenTree* op1 = base->AsOp()->gtOp1;
GenTree* op2 = base->AsOp()->gtOp2;
const bool op1IsStaticFieldBase = gtIsStaticGCBaseHelperCall(op1);
if (op1IsStaticFieldBase && (op2->OperGet() == GT_CNS_INT))
{
FieldSeqNode* fieldSeq = op2->AsIntCon()->gtFieldSeq;
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
assert(!fieldSeq->IsPseudoField());
// No benefit to calling gtGetFieldClassHandle here, as
// the exact field being accessed can vary.
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
assert(fieldCorType == CORINFO_TYPE_CLASS);
objClass = fieldClass;
}
}
}
}
break;
}
case GT_BOX:
{
// Box should just wrap a local var reference which has
// the type we're looking for. Also box only represents a
// non-nullable value type so result cannot be null.
GenTreeBox* box = obj->AsBox();
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
objClass = lvaTable[boxTempLcl].lvClassHnd;
*pIsExact = lvaTable[boxTempLcl].lvClassIsExact;
*pIsNonNull = true;
break;
}
case GT_INDEX:
{
GenTree* array = obj->AsIndex()->Arr();
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
break;
}
default:
{
break;
}
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetHelperCallClassHandle: find class handle for return value of a
// helper call
//
// Arguments:
// call - helper call to examine
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if return value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull)
{
assert(call->gtCallType == CT_HELPER);
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL:
{
// Note for some runtimes these helpers return exact types.
//
// But in those cases the types are also sealed, so there's no
// need to claim exactness here.
const bool helperResultNonNull = (helper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsNonNull = helperResultNonNull;
break;
}
case CORINFO_HELP_CHKCASTCLASS:
case CORINFO_HELP_CHKCASTANY:
case CORINFO_HELP_CHKCASTARRAY:
case CORINFO_HELP_CHKCASTINTERFACE:
case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
case CORINFO_HELP_ISINSTANCEOFINTERFACE:
case CORINFO_HELP_ISINSTANCEOFARRAY:
case CORINFO_HELP_ISINSTANCEOFCLASS:
case CORINFO_HELP_ISINSTANCEOFANY:
{
// Fetch the class handle from the helper call arglist
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* typeArg = args->GetNode();
CORINFO_CLASS_HANDLE castHnd = gtGetHelperArgClassHandle(typeArg);
// We generally assume the type being cast to is the best type
// for the result, unless it is an interface type.
//
// TODO-CQ: when we have default interface methods then
// this might not be the best assumption. We could also
// explore calling something like mergeClasses to identify
// the more specific class. A similar issue arises when
// typing the temp in impCastClassOrIsInstToTree, when we
// expand the cast inline.
if (castHnd != nullptr)
{
DWORD attrs = info.compCompHnd->getClassAttribs(castHnd);
if ((attrs & CORINFO_FLG_INTERFACE) != 0)
{
castHnd = nullptr;
}
}
// If we don't have a good estimate for the type we can use the
// type from the value being cast instead.
if (castHnd == nullptr)
{
GenTree* valueArg = args->GetNext()->GetNode();
castHnd = gtGetClassHandle(valueArg, pIsExact, pIsNonNull);
}
// We don't know at jit time if the cast will succeed or fail, but if it
// fails at runtime then an exception is thrown for cast helpers, or the
// result is set null for instance helpers.
//
// So it safe to claim the result has the cast type.
// Note we don't know for sure that it is exactly this type.
if (castHnd != nullptr)
{
objClass = castHnd;
}
break;
}
case CORINFO_HELP_NEWARR_1_DIRECT:
case CORINFO_HELP_NEWARR_1_OBJ:
case CORINFO_HELP_NEWARR_1_VC:
case CORINFO_HELP_NEWARR_1_ALIGN8:
case CORINFO_HELP_READYTORUN_NEWARR_1:
{
CORINFO_CLASS_HANDLE arrayHnd = (CORINFO_CLASS_HANDLE)call->compileTimeHelperArgumentHandle;
if (arrayHnd != NO_CLASS_HANDLE)
{
objClass = arrayHnd;
*pIsExact = true;
*pIsNonNull = true;
}
break;
}
default:
break;
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetArrayElementClassHandle: find class handle for elements of an array
// of ref types
//
// Arguments:
// array -- array to find handle for
//
// Return Value:
// nullptr if element class handle is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetArrayElementClassHandle(GenTree* array)
{
bool isArrayExact = false;
bool isArrayNonNull = false;
CORINFO_CLASS_HANDLE arrayClassHnd = gtGetClassHandle(array, &isArrayExact, &isArrayNonNull);
if (arrayClassHnd != nullptr)
{
// We know the class of the reference
DWORD attribs = info.compCompHnd->getClassAttribs(arrayClassHnd);
if ((attribs & CORINFO_FLG_ARRAY) != 0)
{
// We know for sure it is an array
CORINFO_CLASS_HANDLE elemClassHnd = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayClassHnd, &elemClassHnd);
if (arrayElemType == CORINFO_TYPE_CLASS)
{
// We know it is an array of ref types
return elemClassHnd;
}
}
}
return nullptr;
}
//------------------------------------------------------------------------
// gtGetFieldClassHandle: find class handle for a field
//
// Arguments:
// fieldHnd - field handle for field in question
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if field value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
//
// May examine runtime state of static field instances.
CORINFO_CLASS_HANDLE Compiler::gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull)
{
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
if (fieldCorType == CORINFO_TYPE_CLASS)
{
// Optionally, look at the actual type of the field's value
bool queryForCurrentClass = true;
INDEBUG(queryForCurrentClass = (JitConfig.JitQueryCurrentStaticFieldClass() > 0););
if (queryForCurrentClass)
{
#if DEBUG
const char* fieldClassName = nullptr;
const char* fieldName = eeGetFieldName(fieldHnd, &fieldClassName);
JITDUMP("Querying runtime about current class of field %s.%s (declared as %s)\n", fieldClassName, fieldName,
eeGetClassName(fieldClass));
#endif // DEBUG
// Is this a fully initialized init-only static field?
//
// Note we're not asking for speculative results here, yet.
CORINFO_CLASS_HANDLE currentClass = info.compCompHnd->getStaticFieldCurrentClass(fieldHnd);
if (currentClass != NO_CLASS_HANDLE)
{
// Yes! We know the class exactly and can rely on this to always be true.
fieldClass = currentClass;
*pIsExact = true;
*pIsNonNull = true;
JITDUMP("Runtime reports field is init-only and initialized and has class %s\n",
eeGetClassName(fieldClass));
}
else
{
JITDUMP("Field's current class not available\n");
}
}
}
return fieldClass;
}
//------------------------------------------------------------------------
// gtIsGCStaticBaseHelperCall: true if tree is fetching the gc static base
// for a subsequent static field access
//
// Arguments:
// tree - tree to consider
//
// Return Value:
// true if the tree is a suitable helper call
//
// Notes:
// Excludes R2R helpers as they specify the target field in a way
// that is opaque to the jit.
bool Compiler::gtIsStaticGCBaseHelperCall(GenTree* tree)
{
if (tree->OperGet() != GT_CALL)
{
return false;
}
GenTreeCall* call = tree->AsCall();
if (call->gtCallType != CT_HELPER)
{
return false;
}
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
// We are looking for a REF type so only need to check for the GC base helpers
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
return true;
default:
break;
}
return false;
}
void GenTree::ParseArrayAddress(
Compiler* comp, ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
{
*pArr = nullptr;
ValueNum inxVN = ValueNumStore::NoVN;
target_ssize_t offset = 0;
FieldSeqNode* fldSeq = nullptr;
ParseArrayAddressWork(comp, 1, pArr, &inxVN, &offset, &fldSeq);
// If we didn't find an array reference (perhaps it is the constant null?) we will give up.
if (*pArr == nullptr)
{
return;
}
// OK, new we have to figure out if any part of the "offset" is a constant contribution to the index.
// First, sum the offsets of any fields in fldSeq.
unsigned fieldOffsets = 0;
FieldSeqNode* fldSeqIter = fldSeq;
// Also, find the first non-pseudo field...
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
{
if (fldSeqIter == FieldSeqStore::NotAField())
{
// TODO-Review: A NotAField here indicates a failure to properly maintain the field sequence
// See test case self_host_tests_x86\jit\regression\CLR-x86-JIT\v1-m12-beta2\ b70992\ b70992.exe
// Safest thing to do here is to drop back to MinOpts
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (comp->opts.optRepeat)
{
// We don't guarantee preserving these annotations through the entire optimizer, so
// just conservatively return null if under optRepeat.
*pArr = nullptr;
return;
}
#endif // DEBUG
noway_assert(!"fldSeqIter is NotAField() in ParseArrayAddress");
}
if (!FieldSeqStore::IsPseudoField(fldSeqIter->m_fieldHnd))
{
if (*pFldSeq == nullptr)
{
*pFldSeq = fldSeqIter;
}
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldSeqIter->m_fieldHnd != nullptr);
CorInfoType cit = comp->info.compCompHnd->getFieldType(fldSeqIter->m_fieldHnd, &fldCls);
fieldOffsets += comp->compGetTypeSize(cit, fldCls);
}
fldSeqIter = fldSeqIter->m_next;
}
// Is there some portion of the "offset" beyond the first-elem offset and the struct field suffix we just computed?
if (!FitsIn<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset) ||
!FitsIn<target_ssize_t>(arrayInfo->m_elemSize))
{
// This seems unlikely, but no harm in being safe...
*pInxVN = comp->GetValueNumStore()->VNForExpr(nullptr, TYP_INT);
return;
}
// Otherwise...
target_ssize_t offsetAccountedFor = static_cast<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset);
target_ssize_t elemSize = static_cast<target_ssize_t>(arrayInfo->m_elemSize);
target_ssize_t constIndOffset = offset - offsetAccountedFor;
// This should be divisible by the element size...
assert((constIndOffset % elemSize) == 0);
target_ssize_t constInd = constIndOffset / elemSize;
ValueNumStore* vnStore = comp->GetValueNumStore();
if (inxVN == ValueNumStore::NoVN)
{
// Must be a constant index.
*pInxVN = vnStore->VNForPtrSizeIntCon(constInd);
}
else
{
//
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
//
// The value associated with the index value number (inxVN) is the offset into the array,
// which has been scaled by element size. We need to recover the array index from that offset
if (vnStore->IsVNConstant(inxVN))
{
target_ssize_t index = vnStore->CoercedConstantValue<target_ssize_t>(inxVN);
noway_assert(elemSize > 0 && ((index % elemSize) == 0));
*pInxVN = vnStore->VNForPtrSizeIntCon((index / elemSize) + constInd);
}
else
{
bool canFoldDiv = false;
// If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
// the division by elemSize.
VNFuncApp funcApp;
if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc)GT_MUL)
{
ValueNum vnForElemSize = vnStore->VNForLongCon(elemSize);
// One of the multiply operand is elemSize, so the resulting
// index VN should simply be the other operand.
if (funcApp.m_args[1] == vnForElemSize)
{
*pInxVN = funcApp.m_args[0];
canFoldDiv = true;
}
else if (funcApp.m_args[0] == vnForElemSize)
{
*pInxVN = funcApp.m_args[1];
canFoldDiv = true;
}
}
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
if (!canFoldDiv)
{
ValueNum vnForElemSize = vnStore->VNForPtrSizeIntCon(elemSize);
ValueNum vnForScaledInx = vnStore->VNForFunc(TYP_I_IMPL, VNFunc(GT_DIV), inxVN, vnForElemSize);
*pInxVN = vnForScaledInx;
}
if (constInd != 0)
{
ValueNum vnForConstInd = comp->GetValueNumStore()->VNForPtrSizeIntCon(constInd);
VNFunc vnFunc = VNFunc(GT_ADD);
*pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL, vnFunc, *pInxVN, vnForConstInd);
}
}
}
}
void GenTree::ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq)
{
if (TypeGet() == TYP_REF)
{
// This must be the array pointer.
*pArr = this;
assert(inputMul == 1); // Can't multiply the array pointer by anything.
}
else
{
switch (OperGet())
{
case GT_CNS_INT:
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, AsIntCon()->gtFieldSeq);
assert(!AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
*pOffset += (inputMul * (target_ssize_t)(AsIntCon()->gtIconVal));
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
if (OperGet() == GT_SUB)
{
inputMul = -inputMul;
}
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
case GT_MUL:
{
// If one op is a constant, continue parsing down.
target_ssize_t subMul = 0;
GenTree* nonConst = nullptr;
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
AsOp()->gtOp2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
else
{
assert(!AsOp()->gtOp1->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp1->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp2;
}
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
if (nonConst != nullptr)
{
nonConst->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
}
break;
case GT_LSH:
// If one op is a constant, continue parsing down.
if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
target_ssize_t shiftVal = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
target_ssize_t subMul = target_ssize_t{1} << shiftVal;
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
break;
case GT_COMMA:
// We don't care about exceptions for this purpose.
if (AsOp()->gtOp1->OperIs(GT_BOUNDS_CHECK) || AsOp()->gtOp1->IsNothingNode())
{
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
break;
default:
break;
}
// If we didn't return above, must be a contribution to the non-constant part of the index VN.
ValueNum vn = comp->GetValueNumStore()->VNLiberalNormalValue(gtVNPair);
if (inputMul != 1)
{
ValueNum mulVN = comp->GetValueNumStore()->VNForLongCon(inputMul);
vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_MUL), mulVN, vn);
}
if (*pInxVN == ValueNumStore::NoVN)
{
*pInxVN = vn;
}
else
{
*pInxVN = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_ADD), *pInxVN, vn);
}
}
}
bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
if (OperIsIndir())
{
if (gtFlags & GTF_IND_ARR_INDEX)
{
bool b = comp->GetArrayInfoMap()->Lookup(this, arrayInfo);
assert(b);
return true;
}
// Otherwise...
GenTree* addr = AsIndir()->Addr();
return addr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
else
{
return false;
}
}
bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_ADD:
{
GenTree* arrAddr = nullptr;
GenTree* offset = nullptr;
if (AsOp()->gtOp1->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp1;
offset = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp2;
offset = AsOp()->gtOp1;
}
else
{
return false;
}
if (!offset->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return arrAddr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
case GT_ADDR:
{
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->OperGet() != GT_IND)
{
return false;
}
else
{
// The "Addr" node might be annotated with a zero-offset field sequence.
FieldSeqNode* zeroOffsetFldSeq = nullptr;
if (comp->GetZeroOffsetFieldMap()->Lookup(this, &zeroOffsetFldSeq))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, zeroOffsetFldSeq);
}
return addrArg->ParseArrayElemForm(comp, arrayInfo, pFldSeq);
}
}
default:
return false;
}
}
bool GenTree::ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_CNS_INT:
{
GenTreeIntCon* icon = AsIntCon();
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
return true;
}
case GT_ADD:
if (!AsOp()->gtOp1->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return AsOp()->gtOp2->ParseOffsetForm(comp, pFldSeq);
default:
return false;
}
}
void GenTree::LabelIndex(Compiler* comp, bool isConst)
{
switch (OperGet())
{
case GT_CNS_INT:
// If we got here, this is a contribution to the constant part of the index.
if (isConst)
{
AsIntCon()->gtFieldSeq =
comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
return;
case GT_LCL_VAR:
gtFlags |= GTF_VAR_ARR_INDEX;
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->LabelIndex(comp, isConst);
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
case GT_CAST:
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
case GT_ARR_LENGTH:
gtFlags |= GTF_ARRLEN_ARR_IDX;
return;
default:
// For all other operators, peel off one constant; and then label the other if it's also a constant.
if (OperIsArithmetic() || OperIsCompare())
{
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
}
else if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
}
// Otherwise continue downward on both, labeling vars.
AsOp()->gtOp1->LabelIndex(comp, false);
AsOp()->gtOp2->LabelIndex(comp, false);
}
break;
}
}
// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
//
// static
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr);
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd)
{
FieldSeqNode fsn(fieldHnd, nullptr);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
{
if (a == nullptr)
{
return b;
}
else if (a == NotAField())
{
return NotAField();
}
else if (b == nullptr)
{
return a;
}
else if (b == NotAField())
{
return NotAField();
// Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
// together collapse to one.
}
else if (a->m_next == nullptr && a->m_fieldHnd == ConstantIndexPseudoField &&
b->m_fieldHnd == ConstantIndexPseudoField)
{
return b;
}
else
{
// We should never add a duplicate FieldSeqNode
assert(a != b);
FieldSeqNode* tmp = Append(a->m_next, b);
FieldSeqNode fsn(a->m_fieldHnd, tmp);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
}
// Static vars.
int FieldSeqStore::FirstElemPseudoFieldStruct;
int FieldSeqStore::ConstantIndexPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
bool FieldSeqNode::IsFirstElemFieldSeq()
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField;
}
bool FieldSeqNode::IsConstantIndexFieldSeq()
{
return m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
bool FieldSeqNode::IsPseudoField() const
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField || m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
#ifdef FEATURE_SIMD
GenTreeSIMD* Compiler::gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, op2, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
//-------------------------------------------------------------------
// SetOpLclRelatedToSIMDIntrinsic: Determine if the tree has a local var that needs to be set
// as used by a SIMD intrinsic, and if so, set that local var appropriately.
//
// Arguments:
// op - The tree, to be an operand of a new GT_SIMD node, to check.
//
void Compiler::SetOpLclRelatedToSIMDIntrinsic(GenTree* op)
{
if (op == nullptr)
{
return;
}
if (op->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(op);
}
else if (op->OperIs(GT_OBJ))
{
GenTree* addr = op->AsIndir()->Addr();
if (addr->OperIs(GT_ADDR))
{
GenTree* addrOp1 = addr->AsOp()->gtGetOp1();
if (addrOp1->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(addrOp1);
}
}
}
}
bool GenTree::isCommutativeSIMDIntrinsic()
{
assert(gtOper == GT_SIMD);
switch (AsSIMD()->GetSIMDIntrinsicId())
{
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
return true;
default:
return false;
}
}
void GenTreeMultiOp::ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount)
{
size_t oldOperandCount = GetOperandCount();
GenTree** oldOperands = GetOperandArray();
if (newOperandCount > oldOperandCount)
{
if (newOperandCount <= inlineOperandCount)
{
assert(oldOperandCount <= inlineOperandCount);
assert(oldOperands == inlineOperands);
}
else
{
// The most difficult case: we need to recreate the dynamic array.
assert(compiler != nullptr);
m_operands = compiler->getAllocator(CMK_ASTNode).allocate<GenTree*>(newOperandCount);
}
}
else
{
// We are shrinking the array and may in process switch to an inline representation.
// We choose to do so for simplicity ("if a node has <= InlineOperandCount operands,
// then it stores them inline"), but actually it may be more profitable to not do that,
// it will save us a copy and a potential cache miss (though the latter seems unlikely).
if ((newOperandCount <= inlineOperandCount) && (oldOperands != inlineOperands))
{
m_operands = inlineOperands;
}
}
#ifdef DEBUG
for (size_t i = 0; i < newOperandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
SetOperandCount(newOperandCount);
}
/* static */ bool GenTreeMultiOp::OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2)
{
if (op1->GetOperandCount() != op2->GetOperandCount())
{
return false;
}
for (size_t i = 1; i <= op1->GetOperandCount(); i++)
{
if (!Compare(op1->Op(i), op2->Op(i)))
{
return false;
}
}
return true;
}
void GenTreeMultiOp::InitializeOperands(GenTree** operands, size_t operandCount)
{
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = operands[i];
gtFlags |= (operands[i]->gtFlags & GTF_ALL_EFFECT);
}
SetOperandCount(operandCount);
}
var_types GenTreeJitIntrinsic::GetAuxiliaryType() const
{
CorInfoType auxiliaryJitType = GetAuxiliaryJitType();
if (auxiliaryJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(auxiliaryJitType);
}
var_types GenTreeJitIntrinsic::GetSimdBaseType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(simdBaseJitType);
}
// Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeSIMD::OperIsMemoryLoad() const
{
if (GetSIMDIntrinsicId() == SIMDIntrinsicInitArray)
{
return true;
}
return false;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeSIMD::Equals(GenTreeSIMD* op1, GenTreeSIMD* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetSIMDIntrinsicId() == op2->GetSIMDIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool GenTree::isCommutativeHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
return HWIntrinsicInfo::IsCommutative(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isContainableHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_SSE_LoadAlignedVector128:
case NI_SSE_LoadScalarVector128:
case NI_SSE_LoadVector128:
case NI_SSE2_LoadAlignedVector128:
case NI_SSE2_LoadScalarVector128:
case NI_SSE2_LoadVector128:
case NI_AVX_LoadAlignedVector256:
case NI_AVX_LoadVector256:
case NI_AVX_ExtractVector128:
case NI_AVX2_ExtractVector128:
{
return true;
}
default:
{
return false;
}
}
#elif TARGET_ARM64
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_Vector64_get_Zero:
case NI_Vector128_get_Zero:
{
return true;
}
default:
{
return false;
}
}
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isRMWHWIntrinsic(Compiler* comp)
{
assert(gtOper == GT_HWINTRINSIC);
assert(comp != nullptr);
#if defined(TARGET_XARCH)
if (!comp->canUseVexEncoding())
{
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
}
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
// TODO-XArch-Cleanup: Move this switch block to be table driven.
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
case NI_FMA_MultiplyAdd:
case NI_FMA_MultiplyAddNegated:
case NI_FMA_MultiplyAddNegatedScalar:
case NI_FMA_MultiplyAddScalar:
case NI_FMA_MultiplyAddSubtract:
case NI_FMA_MultiplySubtract:
case NI_FMA_MultiplySubtractAdd:
case NI_FMA_MultiplySubtractNegated:
case NI_FMA_MultiplySubtractNegatedScalar:
case NI_FMA_MultiplySubtractScalar:
{
return true;
}
default:
{
return false;
}
}
#elif defined(TARGET_ARM64)
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
SetOpLclRelatedToSIMDIntrinsic(op4);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic, op1, op2, op3, op4);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount);
for (size_t i = 0; i < operandCount; i++)
{
nodeBuilder.AddOperand(i, operands[i]);
SetOpLclRelatedToSIMDIntrinsic(operands[i]);
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++)
{
SetOpLclRelatedToSIMDIntrinsic(nodeBuilder.GetOperand(i));
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeGet() == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
if (varTypeIsUnsigned(simdBaseType))
{
return op1;
}
#if defined(TARGET_XARCH)
if (varTypeIsFloating(simdBaseType))
{
// Abs(v) = v & ~new vector<T>(-0.0);
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
GenTree* bitMask = gtNewDconNode(-0.0, simdBaseType);
bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3)))
{
NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs;
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
GenTree* tmp;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
// op1 = op1 < Zero
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// tmp = Zero - op1Dup1
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, tmp, op1Dup2)
return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
NamedIntrinsic intrinsic = NI_AdvSimd_Abs;
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
else if (varTypeIsLong(simdBaseType))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif
}
GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op1 != nullptr);
assert(op1->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
assert(op2 != nullptr);
if ((op == GT_LSH) || (op == GT_RSH) || (op == GT_RSZ))
{
assert(op2->TypeIs(TYP_INT));
}
else
{
assert(op2->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
}
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_ADD:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Add;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Add;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Add;
}
else
{
intrinsic = NI_SSE2_Add;
}
break;
}
case GT_AND:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_And;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_And;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_And;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_And;
}
else
{
intrinsic = NI_SSE2_And;
}
break;
}
case GT_AND_NOT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_AndNot;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_AndNot;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_AndNot;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_AndNot;
}
else
{
intrinsic = NI_SSE2_AndNot;
}
// GT_AND_NOT expects `op1 & ~op2`, but xarch does `~op1 & op2`
std::swap(op1, op2);
break;
}
case GT_DIV:
{
// TODO-XARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Divide;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Divide;
}
else
{
intrinsic = NI_SSE2_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsByte(simdBaseType));
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT,
16, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (op == GT_LSH)
{
intrinsic = NI_AVX2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AVX2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AVX2_ShiftRightLogical;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_SSE2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_SSE2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_SSE2_ShiftRightLogical;
}
break;
}
case GT_MUL:
{
GenTree** broadcastOp = nullptr;
if (varTypeIsArithmetic(op1))
{
broadcastOp = &op1;
}
else if (varTypeIsArithmetic(op2))
{
broadcastOp = &op2;
}
if (broadcastOp != nullptr)
{
*broadcastOp =
gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else
{
intrinsic = NI_SSE2_MultiplyLow;
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_MultiplyLow;
}
else
{
// op1Dup = op1
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector multiply"));
// op2Dup = op2
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector multiply"));
// op1 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Multiply(op2.AsUInt32(), op1.AsUInt32()).AsInt32()
op2 = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Shuffle(op2, (0, 0, 2, 0))
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32()
op1 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG,
simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = Sse2.UnpackLow(op1, op2)
intrinsic = NI_SSE2_UnpackLow;
}
break;
}
case TYP_FLOAT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE2_Multiply;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Or;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Or;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Or;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Or;
}
else
{
intrinsic = NI_SSE2_Or;
}
break;
}
case GT_SUB:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Subtract;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Subtract;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Subtract;
}
else
{
intrinsic = NI_SSE2_Subtract;
}
break;
}
case GT_XOR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Xor;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Xor;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Xor;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Xor;
}
else
{
intrinsic = NI_SSE2_Xor;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_ADD:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AddScalar : NI_AdvSimd_Arm64_Add;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_AddScalar;
}
else
{
intrinsic = NI_AdvSimd_Add;
}
break;
}
case GT_AND:
{
intrinsic = NI_AdvSimd_And;
break;
}
case GT_AND_NOT:
{
intrinsic = NI_AdvSimd_BitwiseClear;
break;
}
case GT_DIV:
{
// TODO-AARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_DivideScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmeticScalar;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogical;
}
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
if (op != GT_LSH)
{
op2 = gtNewOperNode(GT_NEG, TYP_INT, op2);
}
op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmeticScalar;
}
else
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftLogical;
}
}
break;
}
case GT_MUL:
{
assert(!varTypeIsLong(simdBaseType));
GenTree** scalarOp = nullptr;
if (varTypeIsArithmetic(op1))
{
// MultiplyByScalar requires the scalar op to be op2
std::swap(op1, op2);
scalarOp = &op2;
}
else if (varTypeIsArithmetic(op2))
{
scalarOp = &op2;
}
switch (JitType2PreciseVarType(simdBaseJitType))
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (scalarOp != nullptr)
{
*scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
intrinsic = NI_AdvSimd_Multiply;
break;
}
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_CreateScalarUnsafe,
simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create, simdBaseJitType,
8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Arm64_Multiply;
}
if (simdSize == 8)
{
intrinsic = NI_AdvSimd_MultiplyScalar;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
intrinsic = NI_AdvSimd_Or;
break;
}
case GT_SUB:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_SubtractScalar : NI_AdvSimd_Arm64_Subtract;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_SubtractScalar;
}
else
{
intrinsic = NI_AdvSimd_Subtract;
}
break;
}
case GT_XOR:
{
intrinsic = NI_AdvSimd_Xor;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Ceiling;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Ceiling;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_CeilingScalar : NI_AdvSimd_Arm64_Ceiling;
}
else
{
intrinsic = NI_AdvSimd_Ceiling;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareEqual;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareEqual;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_CompareEqual;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// tmp = (op1 == op2) i.e. compare for equality as if op1 and op2 are vector of int
// op1 = tmp
// op2 = Shuffle(tmp, (2, 3, 0, 1))
// result = BitwiseAnd(op1, op2)
//
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of
// respective long elements.
GenTree* tmp =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp for vector Equals"));
op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareEqual;
}
break;
}
case GT_GE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareGreaterThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = GreaterThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_GT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports > for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector GreaterThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareGreaterThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareGreaterThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// GreaterThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector GreaterThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareLessThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = LessThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_LT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports < for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector LessThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareLessThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareLessThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// LessThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector LessThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareLessThan;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareEqual;
}
break;
}
case GT_GE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareGreaterThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThanOrEqual;
}
break;
}
case GT_GT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic =
(simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanScalar : NI_AdvSimd_Arm64_CompareGreaterThan;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareLessThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThanOrEqual;
}
break;
}
case GT_LT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanScalar : NI_AdvSimd_Arm64_CompareLessThan;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThan;
}
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
}
else
{
intrinsic = NI_Vector128_op_Equality;
}
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
getAllBitsSet = NI_Vector256_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Equality : NI_Vector128_op_Equality;
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 8)
{
intrinsic = NI_Vector64_op_Equality;
getAllBitsSet = NI_Vector64_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
assert(op3 != nullptr);
assert(op3->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
// TODO-XARCH-CQ: It's likely beneficial to have a dedicated CndSel node so we
// can special case when the condition is the result of various compare operations.
//
// When it is, the condition is AllBitsSet or Zero on a per-element basis and we
// could change this to be a Blend operation in lowering as an optimization.
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector conditional select"));
// op2 = op2 & op1
op2 = gtNewSimdBinOpNode(GT_AND, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op3 = op3 & ~op1Dup
op3 = gtNewSimdBinOpNode(GT_AND_NOT, type, op3, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op2 | op3
return gtNewSimdBinOpNode(GT_OR, type, op2, op3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#if defined(TARGET_XARCH)
#if defined(TARGET_X86)
if (varTypeIsLong(simdBaseType) && !op1->IsIntegralConst())
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
// works on 32-bit x86 systems.
unreached();
}
#endif // TARGET_X86
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_Create;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
hwIntrinsicID = NI_Vector64_Create;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsArithmetic(type));
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(JITtype2varType(simdBaseJitType) == type);
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
if (simdSize == 32)
{
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_Dot;
}
else
{
assert(((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) ||
compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_Vector128_Dot;
}
#elif defined(TARGET_ARM64)
assert(!varTypeIsLong(simdBaseType));
intrinsic = (simdSize == 8) ? NI_Vector64_Dot : NI_Vector128_Dot;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
intrinsic = NI_AVX_Floor;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Floor;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_FloorScalar : NI_AdvSimd_Arm64_Floor;
}
else
{
intrinsic = NI_AdvSimd_Floor;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic intrinsicId = NI_Vector128_GetElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
intrinsicId = NI_Vector256_GetElement;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
intrinsicId = NI_Vector64_GetElement;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
int immUpperBound = getSIMDVectorLength(simdSize, simdBaseType) - 1;
bool rangeCheckNeeded = !op2->OperIsConst();
if (!rangeCheckNeeded)
{
ssize_t imm8 = op2->AsIntCon()->IconValue();
rangeCheckNeeded = (imm8 < 0) || (imm8 > immUpperBound);
}
if (rangeCheckNeeded)
{
op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
}
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Max;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Max;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Max(op1, op2)
op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Max;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Max;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MaxScalar : NI_AdvSimd_Arm64_Max;
}
else
{
intrinsic = NI_AdvSimd_Max;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max"));
// op1 = op1 > op2
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Min;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Min;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Min(op1, op2)
op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Min;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Min;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MinScalar : NI_AdvSimd_Arm64_Min;
}
else
{
intrinsic = NI_AdvSimd_Min;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min"));
// op1 = op1 < op2
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
GenTree* tmp1;
GenTree* tmp2;
#if defined(TARGET_XARCH)
GenTree* tmp3;
GenTree* tmp4;
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// This is the same in principle to the other comments below, however due to
// code formatting, its too long to reasonably display here.
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U | 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8L, 8U, 9L, 9U, AL, AU, BL, BU | CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, -- | 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, -- | CL, --, DL, --, EL, --, FL, --
// tmp4 = Elements 0L, 1L, 2L, 3L, 8L, 9L, AL, BL | 4L, 5L, 6L, 7L, CL, DL, EL, FL
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L | 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector256.Create(0x0000FFFF).AsInt16();
// var tmp2 = Avx2.And(op1.AsInt16(), tmp1);
// var tmp3 = Avx2.And(op2.AsInt16(), tmp1);
// var tmp4 = Avx2.PackUnsignedSaturate(tmp2, tmp3);
// return Avx2.Permute4x64(tmp4.AsUInt64(), SHUFFLE_WYZX).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate, CORINFO_TYPE_USHORT,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0, 1 | 2, 3; 0L, 0U, 1L, 1U | 2L, 2U, 3L, 3U
// op2 = Elements 4, 5 | 6, 7; 4L, 4U, 5L, 5U | 6L, 6U, 7L, 7U
//
// tmp1 = Elements 0L, 4L, 0U, 4U | 2L, 6L, 2U, 6U
// tmp2 = Elements 1L, 5L, 1U, 5U | 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 1L, 4L, 5L | 2L, 3L, 6L, 7L
// return Elements 0L, 1L, 2L, 3L | 4L, 5L, 6L, 7L
//
// var tmp1 = Avx2.UnpackLow(op1, op2);
// var tmp2 = Avx2.UnpackHigh(op1, op2);
// var tmp3 = Avx2.UnpackLow(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
opBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1 | 2, 3
// op2 = Elements 4, 5 | 6, 7
//
// tmp1 = Elements 0, 1, 2, 3 | -, -, -, -
// tmp1 = Elements 4, 5, 6, 7
// return Elements 0, 1, 2, 3 | 4, 5, 6, 7
//
// var tmp1 = Avx.ConvertToVector128Single(op1).ToVector256Unsafe();
// var tmp2 = Avx.ConvertToVector128Single(op2);
// return Avx.InsertVector128(tmp1, tmp2, 1);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, gtNewIconNode(1), NI_AVX_InsertVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
// op1 = Elements 0, 1, 2, 3, 4, 5, 6, 7; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U, 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8, 9, A, B, C, D, E, F; 8L, 8U, 9L, 9U, AL, AU, BL, BU, CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --, 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, --, CL, --, DL, --, EL, --, FL, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector128.Create((ushort)(0x00FF)).AsSByte();
// var tmp2 = Sse2.And(op1.AsSByte(), tmp1);
// var tmp3 = Sse2.And(op2.AsSByte(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
// op1 = Elements 0, 1, 2, 3; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U
// op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
//
// ...
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// ...
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --
// tmp3 = Elements 4L, --, 5L, --, 6L, --, 7L, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Vector128.Create(0x0000FFFF).AsInt16();
// var tmp2 = Sse2.And(op1.AsInt16(), tmp1);
// var tmp3 = Sse2.And(op2.AsInt16(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp2, tmp3).As<T>();
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate,
CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic);
}
else
{
// ...
//
// tmp1 = Elements 0L, 4L, 0U, 4U, 1L, 5L, 1U, 5U
// tmp2 = Elements 2L, 6L, 2U, 6U, 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 2L, 4L, 6L, 0U, 2U, 4U, 6U
// tmp4 = Elements 1L, 3L, 5L, 7L, 1U, 3U, 5U, 7U
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt16(), op2.AsUInt16());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt16(), op2.AsUInt16());
// var tmp3 = Sse2.UnpackLow(tmp1, tmp2);
// var tmp4 = Sse2.UnpackHigh(tmp1, tmp2);
// return Sse2.UnpackLow(tmp3, tmp4).As<T>();
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
GenTree* tmp2Dup;
tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp2 for vector narrow"));
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
}
case TYP_INT:
case TYP_UINT:
{
// op1 = Elements 0, 1; 0L, 0U, 1L, 1U
// op2 = Elements 2, 3; 2L, 2U, 3L, 3U
//
// tmp1 = Elements 0L, 2L, 0U, 2U
// tmp2 = Elements 1L, 3L, 1U, 3U
// return Elements 0L, 1L, 2L, 3L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt32(), op2.AsUInt32());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32());
// return Sse2.UnpackLow(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1
// op2 = Elements 2, 3
//
// tmp1 = Elements 0, 1, -, -
// tmp1 = Elements 2, 3, -, -
// return Elements 0, 1, 2, 3
//
// var tmp1 = Sse2.ConvertToVector128Single(op1);
// var tmp2 = Sse2.ConvertToVector128Single(op2);
// return Sse.MoveLowToHigh(tmp1, tmp2);
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1);
// return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = AdvSimd.ExtractNarrowingLower(op1);
// return AdvSimd.ExtractNarrowingUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
else if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = op1.ToVector128Unsafe();
// return AdvSimd.Arm64.ConvertToSingleLower(tmp1);
CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1.AsUInt64(), 1, op2.AsUInt64());
// return AdvSimd.ExtractNarrowingUpper(tmp2).As<T>();
CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Sqrt;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Sqrt;
}
else
{
intrinsic = NI_SSE2_Sqrt;
}
#elif defined(TARGET_ARM64)
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_SqrtScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Sqrt;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp = nullptr;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType);
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
// HorizontalAdd combines pairs so we need log2(vectorLength) passes to sum all elements together.
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
if (simdSize == 32)
{
// Minus 1 because for the last pass we split the vector to low / high and add them together.
haddCount -= 1;
if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_HorizontalAdd;
}
}
else if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE3));
intrinsic = NI_SSE3_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSSE3));
intrinsic = NI_SSSE3_HorizontalAdd;
}
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add;
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdHWIntrinsicNode(simdType, tmp, NI_Vector256_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
{
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 8)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
if (simdSize == 8)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_DOUBLE:
case TYP_LONG:
case TYP_ULONG:
{
if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* op2 = nullptr;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_NEG:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
}
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// Zero - op1
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case GT_NOT:
{
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = (simdSize == 32) ? NI_Vector256_get_AllBitsSet : NI_Vector128_get_AllBitsSet;
op2 = gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 ^ AllBitsSet
return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
case GT_NEG:
{
if (varTypeIsSigned(simdBaseType))
{
if (simdBaseType == TYP_LONG)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else
{
intrinsic = NI_AdvSimd_Negate;
}
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
// Zero - op1
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
case GT_NOT:
{
return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
}
GenTree* Compiler::gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 =
gtNewSimdHWIntrinsicNode(type, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE2_ConvertToVector128Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen lower"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
tmp1 = op1;
}
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic);
if (simdSize == 8)
{
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return tmp1;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(1), NI_AVX_ExtractVector128, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
GenTree* zero;
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDoubleUpper;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningUpper;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningUpper;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
ssize_t index = 8 / genTypeSize(simdBaseType);
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
zero = gtNewSimdZeroNode(TYP_SIMD16, simdBaseJitType, 16, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128,
simdBaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op2->IsCnsIntOrI());
ssize_t imm8 = op2->AsIntCon()->IconValue();
ssize_t count = simdSize / genTypeSize(simdBaseType);
assert((0 <= imm8) && (imm8 < count));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41_X64));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_WithElement;
}
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
if (simdSize == 8)
{
return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
break;
case TYP_FLOAT:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
break;
default:
unreached();
}
hwIntrinsicID = NI_AdvSimd_Insert;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
intrinsic = (simdSize == 32) ? NI_Vector256_get_Zero : NI_Vector128_get_Zero;
#elif defined(TARGET_ARM64)
intrinsic = (simdSize > 8) ? NI_Vector128_get_Zero : NI_Vector64_get_Zero;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2, op3);
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoad() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
NamedIntrinsic intrinsicId = GetHWIntrinsicId();
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
if (category == HW_Category_MemoryLoad)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryLoad(GetHWIntrinsicId()))
{
// Some intrinsics (without HW_Category_MemoryLoad) also have MemoryLoad semantics
// This is generally because they have both vector and pointer overloads, e.g.,
// * Vector128<byte> BroadcastScalarToVector128(Vector128<byte> value)
// * Vector128<byte> BroadcastScalarToVector128(byte* source)
// So, we need to check the argument's type is memory-reference or Vector128
if ((category == HW_Category_SimpleSIMD) || (category == HW_Category_SIMDScalar))
{
assert(GetOperandCount() == 1);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
CorInfoType auxiliaryType = GetAuxiliaryJitType();
if (auxiliaryType == CORINFO_TYPE_PTR)
{
return true;
}
assert(auxiliaryType == CORINFO_TYPE_UNDEF);
return false;
}
default:
{
unreached();
}
}
}
else if (category == HW_Category_IMM)
{
// Do we have less than 3 operands?
if (GetOperandCount() < 3)
{
return false;
}
else if (HWIntrinsicInfo::isAVX2GatherIntrinsic(GetHWIntrinsicId()))
{
return true;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(GetHWIntrinsicId());
if (category == HW_Category_MemoryStore)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryStore(GetHWIntrinsicId()) &&
(category == HW_Category_IMM || category == HW_Category_Scalar))
{
// Some intrinsics (without HW_Category_MemoryStore) also have MemoryStore semantics
// Bmi2/Bmi2.X64.MultiplyNoFlags may return the lower half result by a out argument
// unsafe ulong MultiplyNoFlags(ulong left, ulong right, ulong* low)
//
// So, the 3-argument form is MemoryStore
if (GetOperandCount() == 3)
{
switch (GetHWIntrinsicId())
{
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
return true;
default:
return false;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad or MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoadOrStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return OperIsMemoryLoad() || OperIsMemoryStore();
#else
return false;
#endif
}
NamedIntrinsic GenTreeHWIntrinsic::GetHWIntrinsicId() const
{
NamedIntrinsic id = gtHWIntrinsicId;
int numArgs = HWIntrinsicInfo::lookupNumArgs(id);
bool numArgsUnknown = numArgs < 0;
assert((static_cast<size_t>(numArgs) == GetOperandCount()) || numArgsUnknown);
return id;
}
void GenTreeHWIntrinsic::SetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
#ifdef DEBUG
size_t oldOperandCount = GetOperandCount();
int newOperandCount = HWIntrinsicInfo::lookupNumArgs(intrinsicId);
bool newCountUnknown = newOperandCount < 0;
// We'll choose to trust the programmer here.
assert((oldOperandCount == static_cast<size_t>(newOperandCount)) || newCountUnknown);
#endif // DEBUG
gtHWIntrinsicId = intrinsicId;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeHWIntrinsic::Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetHWIntrinsicId() == op2->GetHWIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
(op1->GetAuxiliaryType() == op2->GetAuxiliaryType()) && (op1->GetOtherReg() == op2->GetOtherReg()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_HW_INTRINSICS
//---------------------------------------------------------------------------------------
// gtNewMustThrowException:
// create a throw node (calling into JIT helper) that must be thrown.
// The result would be a comma node: COMMA(jithelperthrow(void), x) where x's type should be specified.
//
// Arguments
// helper - JIT helper ID
// type - return type of the node
//
// Return Value
// pointer to the throw node
//
GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd)
{
GenTreeCall* node = gtNewHelperCallNode(helper, TYP_VOID);
node->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
if (type != TYP_VOID)
{
unsigned dummyTemp = lvaGrabTemp(true DEBUGARG("dummy temp of must thrown exception"));
if (type == TYP_STRUCT)
{
lvaSetStruct(dummyTemp, clsHnd, false);
type = lvaTable[dummyTemp].lvType; // struct type is normalized
}
else
{
lvaTable[dummyTemp].lvType = type;
}
GenTree* dummyNode = gtNewLclvNode(dummyTemp, type);
return gtNewOperNode(GT_COMMA, type, node, dummyNode);
}
return node;
}
//---------------------------------------------------------------------------------------
// InitializeStructReturnType:
// Initialize the Return Type Descriptor for a method that returns a struct type
//
// Arguments
// comp - Compiler Instance
// retClsHnd - VM handle to the struct type returned by the method
//
// Return Value
// None
//
void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension callConv)
{
assert(!m_inited);
#if FEATURE_MULTIREG_RET
assert(retClsHnd != NO_CLASS_HANDLE);
unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, callConv, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
case Compiler::SPK_EnclosingType:
m_isEnclosingType = true;
FALLTHROUGH;
case Compiler::SPK_PrimitiveType:
{
assert(returnType != TYP_UNKNOWN);
assert(returnType != TYP_STRUCT);
m_regType[0] = returnType;
break;
}
case Compiler::SPK_ByValueAsHfa:
{
assert(varTypeIsStruct(returnType));
var_types hfaType = comp->GetHfaType(retClsHnd);
// We should have an hfa struct type
assert(varTypeIsValidHfaType(hfaType));
// Note that the retail build issues a warning about a potential divsion by zero without this Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
// The size of this struct should be evenly divisible by elemSize
assert((structSize % elemSize) == 0);
unsigned hfaCount = (structSize / elemSize);
for (unsigned i = 0; i < hfaCount; ++i)
{
m_regType[i] = hfaType;
}
if (comp->compFloatingPointUsed == false)
{
comp->compFloatingPointUsed = true;
}
break;
}
case Compiler::SPK_ByValue:
{
assert(varTypeIsStruct(returnType));
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
assert(structDesc.passedInRegisters);
for (int i = 0; i < structDesc.eightByteCount; i++)
{
assert(i < MAX_RET_REG_COUNT);
m_regType[i] = comp->GetEightByteType(structDesc, i);
}
#elif defined(TARGET_ARM64)
// a non-HFA struct returned using two registers
//
assert((structSize > TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#elif defined(TARGET_X86)
// an 8-byte struct returned using two registers
assert(structSize == 8);
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#else // TARGET_XXX
// This target needs support here!
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
case Compiler::SPK_ByReference:
// We are returning using the return buffer argument
// There are no return registers
break;
default:
unreached(); // By the contract of getReturnTypeForStruct we should never get here.
} // end of switch (howToReturnStruct)
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
m_inited = true;
#endif
}
//---------------------------------------------------------------------------------------
// InitializeLongReturnType:
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
//
void ReturnTypeDesc::InitializeLongReturnType()
{
assert(!m_inited);
#if defined(TARGET_X86) || defined(TARGET_ARM)
// Setups up a ReturnTypeDesc for returning a long using two registers
//
assert(MAX_RET_REG_COUNT >= 2);
m_regType[0] = TYP_INT;
m_regType[1] = TYP_INT;
#else // not (TARGET_X86 or TARGET_ARM)
m_regType[0] = TYP_LONG;
#endif // TARGET_X86 or TARGET_ARM
#ifdef DEBUG
m_inited = true;
#endif
}
//-------------------------------------------------------------------
// GetABIReturnReg: Return i'th return register as per target ABI
//
// Arguments:
// idx - Index of the return register.
// The first return register has an index of 0 and so on.
//
// Return Value:
// Returns i'th return register as per target ABI.
//
// Notes:
// x86 and ARM return long in multiple registers.
// ARM and ARM64 return HFA struct in multiple registers.
//
regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) const
{
unsigned count = GetReturnRegCount();
assert(idx < count);
regNumber resultReg = REG_NA;
#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET;
}
else
{
noway_assert(varTypeUsesFloatReg(regType0));
resultReg = REG_FLOATRET;
}
}
else if (idx == 1)
{
var_types regType1 = GetReturnRegType(1);
if (varTypeIsIntegralOrI(regType1))
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET_1;
}
else
{
resultReg = REG_INTRET;
}
}
else
{
noway_assert(varTypeUsesFloatReg(regType1));
if (varTypeUsesFloatReg(regType0))
{
resultReg = REG_FLOATRET_1;
}
else
{
resultReg = REG_FLOATRET;
}
}
}
#elif defined(TARGET_X86)
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
#elif defined(TARGET_ARM)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
// Ints are returned in one return register.
// Longs are returned in two return registers.
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
}
else
{
// Floats are returned in one return register (f0).
// Doubles are returned in one return register (d0).
// Structs are returned in four registers with HFAs.
assert(idx < MAX_RET_REG_COUNT); // Up to 4 return registers for HFA's
if (regType == TYP_DOUBLE)
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx * 2); // d0, d1, d2 or d3
}
else
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // f0, f1, f2 or f3
}
}
#elif defined(TARGET_ARM64)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
}
else
{
noway_assert(idx < 4); // Up to 4 return registers for HFA's
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // V0, V1, V2 or V3
}
#endif // TARGET_XXX
assert(resultReg != REG_NA);
return resultReg;
}
//--------------------------------------------------------------------------------
// GetABIReturnRegs: get the mask of return registers as per target arch ABI.
//
// Arguments:
// None
//
// Return Value:
// reg mask of return registers in which the return type is returned.
//
// Note:
// This routine can be used when the caller is not particular about the order
// of return registers and wants to know the set of return registers.
//
// static
regMaskTP ReturnTypeDesc::GetABIReturnRegs() const
{
regMaskTP resultMask = RBM_NONE;
unsigned count = GetReturnRegCount();
for (unsigned i = 0; i < count; ++i)
{
resultMask |= genRegMask(GetABIReturnReg(i));
}
return resultMask;
}
//------------------------------------------------------------------------
// The following functions manage the gtRsvdRegs set of temporary registers
// created by LSRA during code generation.
//------------------------------------------------------------------------
// AvailableTempRegCount: return the number of available temporary registers in the (optional) given set
// (typically, RBM_ALLINT or RBM_ALLFLOAT).
//
// Arguments:
// mask - (optional) Check for available temporary registers only in this set.
//
// Return Value:
// Count of available temporary registers in given set.
//
unsigned GenTree::AvailableTempRegCount(regMaskTP mask /* = (regMaskTP)-1 */) const
{
return genCountBits(gtRsvdRegs & mask);
}
//------------------------------------------------------------------------
// GetSingleTempReg: There is expected to be exactly one available temporary register
// in the given mask in the gtRsvdRegs set. Get that register. No future calls to get
// a temporary register are expected. Removes the register from the set, but only in
// DEBUG to avoid doing unnecessary work in non-DEBUG builds.
//
// Arguments:
// mask - (optional) Get an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::GetSingleTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) == 1);
regNumber tempReg = genRegNumFromMask(availableSet);
INDEBUG(gtRsvdRegs &= ~availableSet;) // Remove the register from the set, so it can't be used again.
return tempReg;
}
//------------------------------------------------------------------------
// ExtractTempReg: Find the lowest number temporary register from the gtRsvdRegs set
// that is also in the optional given mask (typically, RBM_ALLINT or RBM_ALLFLOAT),
// and return it. Remove this register from the temporary register set, so it won't
// be returned again.
//
// Arguments:
// mask - (optional) Extract an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::ExtractTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) >= 1);
regMaskTP tempRegMask = genFindLowestBit(availableSet);
gtRsvdRegs &= ~tempRegMask;
return genRegNumFromMask(tempRegMask);
}
//------------------------------------------------------------------------
// GetLclOffs: if `this` is a field or a field address it returns offset
// of the field inside the struct, for not a field it returns 0.
//
// Return Value:
// The offset value.
//
uint16_t GenTreeLclVarCommon::GetLclOffs() const
{
if (OperIsLocalField())
{
return AsLclFld()->GetLclOffs();
}
else
{
return 0;
}
}
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GetResultOpNumForFMA: check if the result is written into one of the operands.
// In the case that none of the operand is overwritten, check if any of them is lastUse.
//
// Return Value:
// The operand number overwritten or lastUse. 0 is the default value, where the result is written into
// a destination that is not one of the source operands and there is no last use op.
//
unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3)
{
// only FMA intrinsic node should call into this function
assert(HWIntrinsicInfo::lookupIsa(gtHWIntrinsicId) == InstructionSet_FMA);
if (use != nullptr && use->OperIs(GT_STORE_LCL_VAR))
{
// For store_lcl_var, check if any op is overwritten
GenTreeLclVarCommon* overwritten = use->AsLclVarCommon();
unsigned overwrittenLclNum = overwritten->GetLclNum();
if (op1->IsLocal() && op1->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 1;
}
else if (op2->IsLocal() && op2->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 2;
}
else if (op3->IsLocal() && op3->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 3;
}
}
// If no overwritten op, check if there is any last use op
// https://github.com/dotnet/runtime/issues/62215
if (op1->OperIs(GT_LCL_VAR) && op1->IsLastUse(0))
return 1;
else if (op2->OperIs(GT_LCL_VAR) && op2->IsLastUse(0))
return 2;
else if (op3->OperIs(GT_LCL_VAR) && op3->IsLastUse(0))
return 3;
return 0;
}
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// IsOffsetMisaligned: check if the field needs a special handling on arm.
//
// Return Value:
// true if it is a float field with a misaligned offset, false otherwise.
//
bool GenTreeLclFld::IsOffsetMisaligned() const
{
if (varTypeIsFloating(gtType))
{
return ((m_lclOffs % emitTypeSize(TYP_FLOAT)) != 0);
}
return false;
}
#endif // TARGET_ARM
bool GenTree::IsInvariant() const
{
return OperIsConst() || Compiler::impIsAddressInLocal(this);
}
//------------------------------------------------------------------------
// IsNeverNegative: returns true if the given tree is known to be never
// negative, i. e. the upper bit will always be zero.
// Only valid for integral types.
//
// Arguments:
// comp - Compiler object, needed for IntegralRange::ForNode
//
// Return Value:
// true if the given tree is known to be never negative
//
bool GenTree::IsNeverNegative(Compiler* comp) const
{
assert(varTypeIsIntegral(this));
if (IsIntegralConst())
{
return AsIntConCommon()->IntegralValue() >= 0;
}
// TODO-Casts: extend IntegralRange to handle constants
return IntegralRange::ForNode((GenTree*)this, comp).IsPositive();
}
| 1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/gentree.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
/*****************************************************************************/
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
struct FieldSeqNode
{
CORINFO_FIELD_HANDLE m_fieldHnd;
FieldSeqNode* m_next;
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next) : m_fieldHnd(fieldHnd), m_next(next)
{
}
// returns true when this is the pseudo #FirstElem field sequence
bool IsFirstElemFieldSeq();
// returns true when this is the pseudo #ConstantIndex field sequence
bool IsConstantIndexFieldSeq();
// returns true when this is the the pseudo #FirstElem field sequence or the pseudo #ConstantIndex field sequence
bool IsPseudoField() const;
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(!IsPseudoField() && (m_fieldHnd != nullptr));
return m_fieldHnd;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in SimplerHash.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_fieldHnd)) ^
static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHnd == fsn2.m_fieldHnd && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
// Dummy variables to provide the addresses for the "pseudo field handle" statics below.
static int FirstElemPseudoFieldStruct;
static int ConstantIndexPseudoFieldStruct;
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
// We have a few "pseudo" field handles:
// This treats the constant offset of the first element of something as if it were a field.
// Works for method table offsets of boxed structs, or first elem offset of arrays/strings.
static CORINFO_FIELD_HANDLE FirstElemPseudoField;
// If there is a constant index, we make a psuedo field to correspond to the constant added to
// offset of the indexed field. This keeps the field sequence structure "normalized", especially in the
// case where the element type is a struct, so we might add a further struct field offset.
static CORINFO_FIELD_HANDLE ConstantIndexPseudoField;
static bool IsPseudoField(CORINFO_FIELD_HANDLE hnd)
{
return hnd == FirstElemPseudoField || hnd == ConstantIndexPseudoField;
}
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
GTF_VAR_ARR_INDEX = 0x00000020, // The variable is part of (the index portion of) an array index expression.
// Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_ARR_INDEX = 0x00800000, // GT_IND -- the indirection represents an (SZ) array index
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF | \
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_ARR_INDEX | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proved this check is always in-bounds
GTF_ARRLEN_ARR_IDX = 0x80000000, // GT_ARR_LENGTH -- Length which feeds into an array index expression
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper)
{
return (firstOper + 1) == secondOper;
}
template <typename... Opers>
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper, Opers... otherOpers)
{
return OpersAreContiguous(firstOper, secondOper) && OpersAreContiguous(secondOper, otherOpers...);
}
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg(Compiler* comp) const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(
OpersAreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount(Compiler* comp) const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex) const;
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex) const;
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex) const;
public:
bool IsLastUse(int regIndex) const;
bool HasLastUse() const;
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
// Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some
// element).
// Sets "*pArrayType" to the class handle for the array type.
// Sets "*inxVN" to the value number inferred for the array index.
// Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
void ParseArrayAddress(
Compiler* comp, struct ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq);
// Helper method for the above.
void ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq);
// Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
// Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
// within a struct array element. If it returns true, sets *arrayInfo to the array information, and sets *pFldSeq
// to the sequence of struct field accesses.
bool ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of a (possible) array element (or struct field within that).
// If it is, sets "*arrayInfo" to the array access info, "*pFldSeq" to the sequence of struct fields
// accessed within the array element, and returns true. If not, returns "false".
bool ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be an int expression. If it is a sequence of one or more integer constants added together,
// returns true and sets "*pFldSeq" to the sequence of fields with which those constants are annotated.
bool ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq);
// Labels "*this" as an array index expression: label all constants and variables that could contribute, as part of
// an affine expression, to the value of the of the index.
void LabelIndex(Compiler* comp, bool isConst = true);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex) const
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtCast -- conversion to a different type (GT_CAST) */
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get i'th ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of i'th return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of i'th register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
//
var_types GetRegType(unsigned index) const
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
SetHWIntrinsicId(hwIntrinsicID);
if (OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
SetHWIntrinsicId(hwIntrinsicID);
if ((sizeof...(Operands) > 0) && OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length), gtIndRngFailBB(nullptr), gtThrowKind(kind)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push, PushAllSlots,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots);
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of i'th register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of i'th position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to i'th position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for i'th position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returns its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
//
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
//-----------------------------------------------------------------------------------
// IsMultiRegLclVar: whether a local var node defines multiple registers
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register defining local var
//
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIsScalarLocal())
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex) const
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - index of register whose type will be returned
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIsScalarLocal())
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex) const
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex) const
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse() const
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
//
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
//
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
/*****************************************************************************/
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
struct FieldSeqNode
{
CORINFO_FIELD_HANDLE m_fieldHnd;
FieldSeqNode* m_next;
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next) : m_fieldHnd(fieldHnd), m_next(next)
{
}
// returns true when this is the pseudo #FirstElem field sequence
bool IsFirstElemFieldSeq();
// returns true when this is the pseudo #ConstantIndex field sequence
bool IsConstantIndexFieldSeq();
// returns true when this is the the pseudo #FirstElem field sequence or the pseudo #ConstantIndex field sequence
bool IsPseudoField() const;
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(!IsPseudoField() && (m_fieldHnd != nullptr));
return m_fieldHnd;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in SimplerHash.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_fieldHnd)) ^
static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHnd == fsn2.m_fieldHnd && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
// Dummy variables to provide the addresses for the "pseudo field handle" statics below.
static int FirstElemPseudoFieldStruct;
static int ConstantIndexPseudoFieldStruct;
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
// We have a few "pseudo" field handles:
// This treats the constant offset of the first element of something as if it were a field.
// Works for method table offsets of boxed structs, or first elem offset of arrays/strings.
static CORINFO_FIELD_HANDLE FirstElemPseudoField;
// If there is a constant index, we make a psuedo field to correspond to the constant added to
// offset of the indexed field. This keeps the field sequence structure "normalized", especially in the
// case where the element type is a struct, so we might add a further struct field offset.
static CORINFO_FIELD_HANDLE ConstantIndexPseudoField;
static bool IsPseudoField(CORINFO_FIELD_HANDLE hnd)
{
return hnd == FirstElemPseudoField || hnd == ConstantIndexPseudoField;
}
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
GTF_VAR_ARR_INDEX = 0x00000020, // The variable is part of (the index portion of) an array index expression.
// Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_ARR_INDEX = 0x00800000, // GT_IND -- the indirection represents an (SZ) array index
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF | \
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_ARR_INDEX | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proved this check is always in-bounds
GTF_ARRLEN_ARR_IDX = 0x80000000, // GT_ARR_LENGTH -- Length which feeds into an array index expression
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper)
{
return (firstOper + 1) == secondOper;
}
template <typename... Opers>
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper, Opers... otherOpers)
{
return OpersAreContiguous(firstOper, secondOper) && OpersAreContiguous(secondOper, otherOpers...);
}
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg(Compiler* comp) const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
regMaskTP gtGetContainedRegMask();
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(
OpersAreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount(Compiler* comp) const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex) const;
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex) const;
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex) const;
public:
bool IsLastUse(int regIndex) const;
bool HasLastUse() const;
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
// Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some
// element).
// Sets "*pArrayType" to the class handle for the array type.
// Sets "*inxVN" to the value number inferred for the array index.
// Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
void ParseArrayAddress(
Compiler* comp, struct ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq);
// Helper method for the above.
void ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq);
// Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
// Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
// within a struct array element. If it returns true, sets *arrayInfo to the array information, and sets *pFldSeq
// to the sequence of struct field accesses.
bool ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of a (possible) array element (or struct field within that).
// If it is, sets "*arrayInfo" to the array access info, "*pFldSeq" to the sequence of struct fields
// accessed within the array element, and returns true. If not, returns "false".
bool ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be an int expression. If it is a sequence of one or more integer constants added together,
// returns true and sets "*pFldSeq" to the sequence of fields with which those constants are annotated.
bool ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq);
// Labels "*this" as an array index expression: label all constants and variables that could contribute, as part of
// an affine expression, to the value of the of the index.
void LabelIndex(Compiler* comp, bool isConst = true);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex) const
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtCast -- conversion to a different type (GT_CAST) */
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get i'th ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of i'th return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of i'th register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
//
var_types GetRegType(unsigned index) const
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
SetHWIntrinsicId(hwIntrinsicID);
if (OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
SetHWIntrinsicId(hwIntrinsicID);
if ((sizeof...(Operands) > 0) && OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length), gtIndRngFailBB(nullptr), gtThrowKind(kind)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push, PushAllSlots,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots);
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of i'th register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of i'th position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to i'th position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for i'th position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returns its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
//
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
//-----------------------------------------------------------------------------------
// IsMultiRegLclVar: whether a local var node defines multiple registers
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register defining local var
//
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIsScalarLocal())
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex) const
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - index of register whose type will be returned
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIsScalarLocal())
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex) const
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex) const
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse() const
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
//
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
//
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
| 1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/pal/src/safecrt/mbusafecrt_internal.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/***
* mbusafecrt_internal.h - internal declarations for SafeCRT functions
*
*
* Purpose:
* This file contains the internal declarations SafeCRT
* functions ported to MacOS. These are the safe versions of
* functions standard functions banned by SWI
****/
/* shields! */
#ifndef MBUSAFECRT_INTERNAL_H
#define MBUSAFECRT_INTERNAL_H
#define PAL_IMPLEMENTATION
#include "pal_mstypes.h"
#ifndef DLLEXPORT
#ifdef _MSC_VER
#define DLLEXPORT __declspec(dllexport)
#else
#define DLLEXPORT __attribute__ ((visibility ("default")))
#endif // _MSC_VER
#endif // !DLLEXPORT
typedef __builtin_va_list va_list;
// The ifdef below are to accommodate Unix build
// that complains about them being declared in stdarg.h already.
#ifndef va_start
#define va_start __builtin_va_start
#endif
#ifndef va_end
#define va_end __builtin_va_end
#endif
#include "mbusafecrt.h"
#ifdef EOF
#undef EOF
#endif
#define EOF -1
#ifdef WEOF
#undef WEOF
#endif
#define WEOF -1
#define CASSERT(p) extern int sanity_check_dummy[1+((!(p))*(-2))];
extern tSafeCRT_AssertFuncPtr sMBUSafeCRTAssertFunc;
typedef struct miniFILE_struct
{
char* _ptr;
int _cnt;
char* _base;
int _flag;
} miniFILE;
#undef _IOWRT
#undef _IOREAD
#undef _IOMYBUF
#define _IOSTRG 1
#define _IOWRT 2
#define _IOREAD 4
#define _IOMYBUF 8
int _putc_nolock( char inChar, miniFILE* inStream );
int _putwc_nolock( char16_t inChar, miniFILE* inStream );
int _getc_nolock( miniFILE* inStream );
int _getwc_nolock( miniFILE* inStream );
int _ungetc_nolock( char inChar, miniFILE* inStream );
int _ungetwc_nolock( char16_t inChar, miniFILE* inStream );
errno_t _safecrt_cfltcvt(double *arg, char *buffer, size_t sizeInBytes, int type, int precision, int flags);
void _safecrt_fassign(int flag, void* argument, char * number );
void _safecrt_wfassign(int flag, void* argument, char16_t * number );
int _minimal_chartowchar( char16_t* outWChar, const char* inChar );
int _output_s( miniFILE* outfile, const char* _Format, va_list _ArgList);
int _woutput_s( miniFILE* outfile, const char16_t* _Format, va_list _ArgList);
int _output( miniFILE *outfile, const char* _Format, va_list _ArgList);
int __tinput_s( miniFILE* inFile, const unsigned char * inFormat, va_list inArgList );
int __twinput_s( miniFILE* inFile, const char16_t * inFormat, va_list inArgList );
#endif /* MBUSAFECRT_INTERNAL_H */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/***
* mbusafecrt_internal.h - internal declarations for SafeCRT functions
*
*
* Purpose:
* This file contains the internal declarations SafeCRT
* functions ported to MacOS. These are the safe versions of
* functions standard functions banned by SWI
****/
/* shields! */
#ifndef MBUSAFECRT_INTERNAL_H
#define MBUSAFECRT_INTERNAL_H
#define PAL_IMPLEMENTATION
#include "pal_mstypes.h"
#ifndef DLLEXPORT
#ifdef _MSC_VER
#define DLLEXPORT __declspec(dllexport)
#else
#define DLLEXPORT __attribute__ ((visibility ("default")))
#endif // _MSC_VER
#endif // !DLLEXPORT
typedef __builtin_va_list va_list;
// The ifdef below are to accommodate Unix build
// that complains about them being declared in stdarg.h already.
#ifndef va_start
#define va_start __builtin_va_start
#endif
#ifndef va_end
#define va_end __builtin_va_end
#endif
#include "mbusafecrt.h"
#ifdef EOF
#undef EOF
#endif
#define EOF -1
#ifdef WEOF
#undef WEOF
#endif
#define WEOF -1
#define CASSERT(p) extern int sanity_check_dummy[1+((!(p))*(-2))];
extern tSafeCRT_AssertFuncPtr sMBUSafeCRTAssertFunc;
typedef struct miniFILE_struct
{
char* _ptr;
int _cnt;
char* _base;
int _flag;
} miniFILE;
#undef _IOWRT
#undef _IOREAD
#undef _IOMYBUF
#define _IOSTRG 1
#define _IOWRT 2
#define _IOREAD 4
#define _IOMYBUF 8
int _putc_nolock( char inChar, miniFILE* inStream );
int _putwc_nolock( char16_t inChar, miniFILE* inStream );
int _getc_nolock( miniFILE* inStream );
int _getwc_nolock( miniFILE* inStream );
int _ungetc_nolock( char inChar, miniFILE* inStream );
int _ungetwc_nolock( char16_t inChar, miniFILE* inStream );
errno_t _safecrt_cfltcvt(double *arg, char *buffer, size_t sizeInBytes, int type, int precision, int flags);
void _safecrt_fassign(int flag, void* argument, char * number );
void _safecrt_wfassign(int flag, void* argument, char16_t * number );
int _minimal_chartowchar( char16_t* outWChar, const char* inChar );
int _output_s( miniFILE* outfile, const char* _Format, va_list _ArgList);
int _woutput_s( miniFILE* outfile, const char16_t* _Format, va_list _ArgList);
int _output( miniFILE *outfile, const char* _Format, va_list _ArgList);
int __tinput_s( miniFILE* inFile, const unsigned char * inFormat, va_list inArgList );
int __twinput_s( miniFILE* inFile, const char16_t * inFormat, va_list inArgList );
#endif /* MBUSAFECRT_INTERNAL_H */
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/pal/tests/palsuite/samples/test1/test.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test.c
**
** Purpose: This test is an example of the basic structure of a PAL test
** suite test case.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(samples_test1_paltest_samples_test1, "samples/test1/paltest_samples_test1")
{
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
Trace("\nTest #1...\n");
#ifdef WIN32
Trace("\nWe are testing under Win32 environment.\n");
#else
Trace("\nWe are testing under Non-Win32 environment.\n");
#endif
Trace("\nThis test has passed.\n");
/* Shutdown the PAL.
*/
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test.c
**
** Purpose: This test is an example of the basic structure of a PAL test
** suite test case.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(samples_test1_paltest_samples_test1, "samples/test1/paltest_samples_test1")
{
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
Trace("\nTest #1...\n");
#ifdef WIN32
Trace("\nWe are testing under Win32 environment.\n");
#else
Trace("\nWe are testing under Non-Win32 environment.\n");
#endif
Trace("\nThis test has passed.\n");
/* Shutdown the PAL.
*/
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/pal/src/libunwind/src/s390x/init.h | /* libunwind - a platform-independent unwind library
Copyright (C) 2002 Hewlett-Packard Co
Contributed by David Mosberger-Tang <[email protected]>
Modified for x86_64 by Max Asbock <[email protected]>
Modified for s390x by Michael Munday <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
static inline int
common_init (struct cursor *c, unsigned use_prev_instr)
{
int ret;
int i;
for (i = UNW_S390X_R0; i <= UNW_S390X_R15; ++i) {
c->dwarf.loc[i] = DWARF_REG_LOC(&c->dwarf, i);
}
for (i = UNW_S390X_F0; i <= UNW_S390X_F15; ++i) {
c->dwarf.loc[i] = DWARF_FPREG_LOC(&c->dwarf, i);
}
/* IP isn't a real register, it is encoded in the PSW */
c->dwarf.loc[UNW_S390X_IP] = DWARF_REG_LOC(&c->dwarf, UNW_S390X_IP);
ret = dwarf_get (&c->dwarf, c->dwarf.loc[UNW_S390X_IP], &c->dwarf.ip);
if (ret < 0)
return ret;
/* Normally the CFA offset on s390x is biased, however this is taken
into account by the CFA offset in dwarf_step, so here we just mark
make it equal to the stack pointer. */
ret = dwarf_get (&c->dwarf, DWARF_REG_LOC (&c->dwarf, UNW_S390X_R15),
&c->dwarf.cfa);
if (ret < 0)
return ret;
c->sigcontext_format = S390X_SCF_NONE;
c->sigcontext_addr = 0;
c->dwarf.args_size = 0;
c->dwarf.stash_frames = 0;
c->dwarf.use_prev_instr = use_prev_instr;
c->dwarf.pi_valid = 0;
c->dwarf.pi_is_dynamic = 0;
c->dwarf.hint = 0;
c->dwarf.prev_rs = 0;
c->dwarf.eh_valid_mask = 0;
return 0;
}
| /* libunwind - a platform-independent unwind library
Copyright (C) 2002 Hewlett-Packard Co
Contributed by David Mosberger-Tang <[email protected]>
Modified for x86_64 by Max Asbock <[email protected]>
Modified for s390x by Michael Munday <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
static inline int
common_init (struct cursor *c, unsigned use_prev_instr)
{
int ret;
int i;
for (i = UNW_S390X_R0; i <= UNW_S390X_R15; ++i) {
c->dwarf.loc[i] = DWARF_REG_LOC(&c->dwarf, i);
}
for (i = UNW_S390X_F0; i <= UNW_S390X_F15; ++i) {
c->dwarf.loc[i] = DWARF_FPREG_LOC(&c->dwarf, i);
}
/* IP isn't a real register, it is encoded in the PSW */
c->dwarf.loc[UNW_S390X_IP] = DWARF_REG_LOC(&c->dwarf, UNW_S390X_IP);
ret = dwarf_get (&c->dwarf, c->dwarf.loc[UNW_S390X_IP], &c->dwarf.ip);
if (ret < 0)
return ret;
/* Normally the CFA offset on s390x is biased, however this is taken
into account by the CFA offset in dwarf_step, so here we just mark
make it equal to the stack pointer. */
ret = dwarf_get (&c->dwarf, DWARF_REG_LOC (&c->dwarf, UNW_S390X_R15),
&c->dwarf.cfa);
if (ret < 0)
return ret;
c->sigcontext_format = S390X_SCF_NONE;
c->sigcontext_addr = 0;
c->dwarf.args_size = 0;
c->dwarf.stash_frames = 0;
c->dwarf.use_prev_instr = use_prev_instr;
c->dwarf.pi_valid = 0;
c->dwarf.pi_is_dynamic = 0;
c->dwarf.hint = 0;
c->dwarf.prev_rs = 0;
c->dwarf.eh_valid_mask = 0;
return 0;
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/pal/tests/palsuite/c_runtime/vswprintf/test12/test12.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test12.c
**
** Purpose: Test #12 for the vswprintf function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../vswprintf.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
PALTEST(c_runtime_vswprintf_test12_paltest_vswprintf_test12, "c_runtime/vswprintf/test12/paltest_vswprintf_test12")
{
int neg = -42;
int pos = 0x1234ab;
INT64 l = I64(0x1234567887654321);
if (PAL_Initialize(argc, argv) != 0)
return(FAIL);
DoNumTest(convert("foo %x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %lx"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %hx"), pos, convert("foo 34ab"));
DoNumTest(convert("foo %Lx"), pos, convert("foo 1234ab"));
DoI64NumTest(convert("foo %I64x"), l, "0x1234567887654321",
convert("foo 1234567887654321"));
DoNumTest(convert("foo %7x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %-7x"), pos, convert("foo 1234ab "));
DoNumTest(convert("foo %.1x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %.7x"), pos, convert("foo 01234ab"));
DoNumTest(convert("foo %07x"), pos, convert("foo 01234ab"));
DoNumTest(convert("foo %#x"), pos, convert("foo 0x1234ab"));
DoNumTest(convert("foo %+x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo % x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %+x"), neg, convert("foo ffffffd6"));
DoNumTest(convert("foo % x"), neg, convert("foo ffffffd6"));
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test12.c
**
** Purpose: Test #12 for the vswprintf function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../vswprintf.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
PALTEST(c_runtime_vswprintf_test12_paltest_vswprintf_test12, "c_runtime/vswprintf/test12/paltest_vswprintf_test12")
{
int neg = -42;
int pos = 0x1234ab;
INT64 l = I64(0x1234567887654321);
if (PAL_Initialize(argc, argv) != 0)
return(FAIL);
DoNumTest(convert("foo %x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %lx"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %hx"), pos, convert("foo 34ab"));
DoNumTest(convert("foo %Lx"), pos, convert("foo 1234ab"));
DoI64NumTest(convert("foo %I64x"), l, "0x1234567887654321",
convert("foo 1234567887654321"));
DoNumTest(convert("foo %7x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %-7x"), pos, convert("foo 1234ab "));
DoNumTest(convert("foo %.1x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %.7x"), pos, convert("foo 01234ab"));
DoNumTest(convert("foo %07x"), pos, convert("foo 01234ab"));
DoNumTest(convert("foo %#x"), pos, convert("foo 0x1234ab"));
DoNumTest(convert("foo %+x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo % x"), pos, convert("foo 1234ab"));
DoNumTest(convert("foo %+x"), neg, convert("foo ffffffd6"));
DoNumTest(convert("foo % x"), neg, convert("foo ffffffd6"));
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/mono/mono/utils/mono-memory-model.h | /**
* \file
* Mapping of the arch memory model.
*
* Author:
* Rodrigo Kumpera ([email protected])
*
* (C) 2011 Xamarin, Inc
*/
#ifndef _MONO_UTILS_MONO_MEMMODEL_H_
#define _MONO_UTILS_MONO_MEMMODEL_H_
#include <config.h>
#include <mono/utils/mono-membar.h>
/*
In order to allow for fast concurrent code, we must use fencing to properly order
memory access - specially on arch with weaker memory models such as ARM or PPC.
On the other hand, we can't use arm's weak model on targets such as x86 that have
a stronger model that requires much much less fencing.
The idea of exposing each arch memory model is to avoid fencing whenever possible
but at the same time make all required ordering explicit.
There are four kinds of barriers, LoadLoad, LoadStore, StoreLoad and StoreStore.
Each arch must define which ones needs fencing.
We assume 3 kinds of barriers are available: load, store and memory (load+store).
TODO: Add support for weaker forms of CAS such as present on ARM.
TODO: replace all explicit uses of memory barriers with macros from this section. This will make a nicer read of lazy init code.
TODO: if we find places where a data depencency could replace barriers, add macros here to help with it
TODO: some arch with strong consistency, such as x86, support weaker access. We might need to expose more kinds of barriers once we exploit this.
*/
/*
* Keep in sync with the enum in mini/mini-llvm-cpp.h.
*/
enum {
MONO_MEMORY_BARRIER_NONE = 0,
MONO_MEMORY_BARRIER_ACQ = 1,
MONO_MEMORY_BARRIER_REL = 2,
MONO_MEMORY_BARRIER_SEQ = 3,
};
#define MEMORY_BARRIER mono_memory_barrier ()
#define LOAD_BARRIER mono_memory_read_barrier ()
#define STORE_BARRIER mono_memory_write_barrier ()
#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
/*
Both x86 and amd64 follow the SPO memory model:
-Loads are not reordered with other loads
-Stores are not reordered with others stores
-Stores are not reordered with earlier loads
*/
/*Neither sfence or mfence provide the required semantics here*/
#define STORE_LOAD_FENCE MEMORY_BARRIER
#define LOAD_RELEASE_FENCE MEMORY_BARRIER
#define STORE_ACQUIRE_FENCE MEMORY_BARRIER
#elif defined(__arm__)
/*
ARM memory model is as weak as it can get. the only guarantee are data dependent
accesses.
LoadStore fences are much better handled using a data depencency such as:
load x; if (x = x) store y;
This trick can be applied to other fences such as LoadLoad, but require some assembly:
LDR R0, [R1]
AND R0, R0, #0
LDR R3, [R4, R0]
*/
#define STORE_STORE_FENCE STORE_BARRIER
#define LOAD_LOAD_FENCE LOAD_BARRIER
#define STORE_LOAD_FENCE MEMORY_BARRIER
#define STORE_ACQUIRE_FENCE MEMORY_BARRIER
#define STORE_RELEASE_FENCE MEMORY_BARRIER
#define LOAD_ACQUIRE_FENCE MEMORY_BARRIER
#define LOAD_RELEASE_FENCE MEMORY_BARRIER
#elif defined(__s390x__)
#define STORE_STORE_FENCE mono_compiler_barrier ()
#define LOAD_LOAD_FENCE mono_compiler_barrier ()
#define STORE_LOAD_FENCE mono_compiler_barrier ()
#define LOAD_STORE_FENCE mono_compiler_barrier ()
#define STORE_RELEASE_FENCE mono_compiler_barrier ()
#else
/*default implementation with the weakest possible memory model */
#define STORE_STORE_FENCE STORE_BARRIER
#define LOAD_LOAD_FENCE LOAD_BARRIER
#define STORE_LOAD_FENCE MEMORY_BARRIER
#define LOAD_STORE_FENCE MEMORY_BARRIER
#define STORE_ACQUIRE_FENCE MEMORY_BARRIER
#define STORE_RELEASE_FENCE MEMORY_BARRIER
#define LOAD_ACQUIRE_FENCE MEMORY_BARRIER
#define LOAD_RELEASE_FENCE MEMORY_BARRIER
#endif
#ifndef STORE_STORE_FENCE
#define STORE_STORE_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_LOAD_FENCE
#define LOAD_LOAD_FENCE mono_compiler_barrier ()
#endif
#ifndef STORE_LOAD_FENCE
#define STORE_LOAD_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_STORE_FENCE
#define LOAD_STORE_FENCE mono_compiler_barrier ()
#endif
#ifndef STORE_RELEASE_FENCE
#define STORE_RELEASE_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_RELEASE_FENCE
#define LOAD_RELEASE_FENCE mono_compiler_barrier ()
#endif
#ifndef STORE_ACQUIRE_FENCE
#define STORE_ACQUIRE_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_ACQUIRE_FENCE
#define LOAD_ACQUIRE_FENCE mono_compiler_barrier ()
#endif
/*Makes sure all previous stores as visible before */
#define mono_atomic_store_seq(target,value) do { \
STORE_STORE_FENCE; \
*(target) = (value); \
} while (0)
/*
Acquire/release semantics macros.
*/
#define mono_atomic_store_release(target,value) do { \
STORE_RELEASE_FENCE; \
*(target) = (value); \
} while (0)
#define mono_atomic_load_release(_type,target) ({ \
_type __tmp; \
LOAD_RELEASE_FENCE; \
__tmp = *(target); \
__tmp; })
#define mono_atomic_load_acquire(var,_type,target) do { \
_type __tmp = *(target); \
LOAD_ACQUIRE_FENCE; \
(var) = __tmp; \
} while (0)
#define mono_atomic_store_acquire(target,value) { \
*(target) = (value); \
STORE_ACQUIRE_FENCE; \
}
#endif /* _MONO_UTILS_MONO_MEMMODEL_H_ */
| /**
* \file
* Mapping of the arch memory model.
*
* Author:
* Rodrigo Kumpera ([email protected])
*
* (C) 2011 Xamarin, Inc
*/
#ifndef _MONO_UTILS_MONO_MEMMODEL_H_
#define _MONO_UTILS_MONO_MEMMODEL_H_
#include <config.h>
#include <mono/utils/mono-membar.h>
/*
In order to allow for fast concurrent code, we must use fencing to properly order
memory access - specially on arch with weaker memory models such as ARM or PPC.
On the other hand, we can't use arm's weak model on targets such as x86 that have
a stronger model that requires much much less fencing.
The idea of exposing each arch memory model is to avoid fencing whenever possible
but at the same time make all required ordering explicit.
There are four kinds of barriers, LoadLoad, LoadStore, StoreLoad and StoreStore.
Each arch must define which ones needs fencing.
We assume 3 kinds of barriers are available: load, store and memory (load+store).
TODO: Add support for weaker forms of CAS such as present on ARM.
TODO: replace all explicit uses of memory barriers with macros from this section. This will make a nicer read of lazy init code.
TODO: if we find places where a data depencency could replace barriers, add macros here to help with it
TODO: some arch with strong consistency, such as x86, support weaker access. We might need to expose more kinds of barriers once we exploit this.
*/
/*
* Keep in sync with the enum in mini/mini-llvm-cpp.h.
*/
enum {
MONO_MEMORY_BARRIER_NONE = 0,
MONO_MEMORY_BARRIER_ACQ = 1,
MONO_MEMORY_BARRIER_REL = 2,
MONO_MEMORY_BARRIER_SEQ = 3,
};
#define MEMORY_BARRIER mono_memory_barrier ()
#define LOAD_BARRIER mono_memory_read_barrier ()
#define STORE_BARRIER mono_memory_write_barrier ()
#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
/*
Both x86 and amd64 follow the SPO memory model:
-Loads are not reordered with other loads
-Stores are not reordered with others stores
-Stores are not reordered with earlier loads
*/
/*Neither sfence or mfence provide the required semantics here*/
#define STORE_LOAD_FENCE MEMORY_BARRIER
#define LOAD_RELEASE_FENCE MEMORY_BARRIER
#define STORE_ACQUIRE_FENCE MEMORY_BARRIER
#elif defined(__arm__)
/*
ARM memory model is as weak as it can get. the only guarantee are data dependent
accesses.
LoadStore fences are much better handled using a data depencency such as:
load x; if (x = x) store y;
This trick can be applied to other fences such as LoadLoad, but require some assembly:
LDR R0, [R1]
AND R0, R0, #0
LDR R3, [R4, R0]
*/
#define STORE_STORE_FENCE STORE_BARRIER
#define LOAD_LOAD_FENCE LOAD_BARRIER
#define STORE_LOAD_FENCE MEMORY_BARRIER
#define STORE_ACQUIRE_FENCE MEMORY_BARRIER
#define STORE_RELEASE_FENCE MEMORY_BARRIER
#define LOAD_ACQUIRE_FENCE MEMORY_BARRIER
#define LOAD_RELEASE_FENCE MEMORY_BARRIER
#elif defined(__s390x__)
#define STORE_STORE_FENCE mono_compiler_barrier ()
#define LOAD_LOAD_FENCE mono_compiler_barrier ()
#define STORE_LOAD_FENCE mono_compiler_barrier ()
#define LOAD_STORE_FENCE mono_compiler_barrier ()
#define STORE_RELEASE_FENCE mono_compiler_barrier ()
#else
/*default implementation with the weakest possible memory model */
#define STORE_STORE_FENCE STORE_BARRIER
#define LOAD_LOAD_FENCE LOAD_BARRIER
#define STORE_LOAD_FENCE MEMORY_BARRIER
#define LOAD_STORE_FENCE MEMORY_BARRIER
#define STORE_ACQUIRE_FENCE MEMORY_BARRIER
#define STORE_RELEASE_FENCE MEMORY_BARRIER
#define LOAD_ACQUIRE_FENCE MEMORY_BARRIER
#define LOAD_RELEASE_FENCE MEMORY_BARRIER
#endif
#ifndef STORE_STORE_FENCE
#define STORE_STORE_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_LOAD_FENCE
#define LOAD_LOAD_FENCE mono_compiler_barrier ()
#endif
#ifndef STORE_LOAD_FENCE
#define STORE_LOAD_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_STORE_FENCE
#define LOAD_STORE_FENCE mono_compiler_barrier ()
#endif
#ifndef STORE_RELEASE_FENCE
#define STORE_RELEASE_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_RELEASE_FENCE
#define LOAD_RELEASE_FENCE mono_compiler_barrier ()
#endif
#ifndef STORE_ACQUIRE_FENCE
#define STORE_ACQUIRE_FENCE mono_compiler_barrier ()
#endif
#ifndef LOAD_ACQUIRE_FENCE
#define LOAD_ACQUIRE_FENCE mono_compiler_barrier ()
#endif
/*Makes sure all previous stores as visible before */
#define mono_atomic_store_seq(target,value) do { \
STORE_STORE_FENCE; \
*(target) = (value); \
} while (0)
/*
Acquire/release semantics macros.
*/
#define mono_atomic_store_release(target,value) do { \
STORE_RELEASE_FENCE; \
*(target) = (value); \
} while (0)
#define mono_atomic_load_release(_type,target) ({ \
_type __tmp; \
LOAD_RELEASE_FENCE; \
__tmp = *(target); \
__tmp; })
#define mono_atomic_load_acquire(var,_type,target) do { \
_type __tmp = *(target); \
LOAD_ACQUIRE_FENCE; \
(var) = __tmp; \
} while (0)
#define mono_atomic_store_acquire(target,value) { \
*(target) = (value); \
STORE_ACQUIRE_FENCE; \
}
#endif /* _MONO_UTILS_MONO_MEMMODEL_H_ */
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/pal/tests/palsuite/locale_info/CompareStringW/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests that CompareStringW returns the correct value and can handle
** invalid parameters.
**
**
**==========================================================================*/
#define CSTR_LESS_THAN 1
#define CSTR_EQUAL 2
#define CSTR_GREATER_THAN 3
#include <palsuite.h>
PALTEST(locale_info_CompareStringW_test1_paltest_comparestringw_test1, "locale_info/CompareStringW/test1/paltest_comparestringw_test1")
{
WCHAR str1[] = {'f','o','o',0};
WCHAR str2[] = {'f','o','o','x',0};
WCHAR str3[] = {'f','O','o',0};
int flags = NORM_IGNORECASE | NORM_IGNOREWIDTH;
int ret;
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = CompareStringW(0x0409, flags, str1, -1, str2, -1);
if (ret != CSTR_LESS_THAN)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_LESS_THAN!\n", str1, -1, str2, -1);
}
ret = CompareStringW(0x0409, flags, str1, -1, str2, 3);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str1, -1, str2, 3);
}
ret = CompareStringW(0x0409, flags, str2, -1, str1, -1);
if (ret != CSTR_GREATER_THAN)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_GREATER_THAN!\n", str2, -1, str1, -1);
}
ret = CompareStringW(0x0409, flags, str1, -1, str3, -1);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str1, -1, str3, -1);
}
ret = CompareStringW(0x0409, flags, str3, -1, str1, -1);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str3, -1, str1, -1);
}
ret = CompareStringW(0x0409, flags, str3, -1, str1, -1);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str3, -1, str1, -1);
}
ret = CompareStringW(0x0409, flags, str1, 0, str3, -1);
if (ret != CSTR_LESS_THAN)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_GREATER_THAN!\n", str1, 0, str3, -1);
}
ret = CompareStringW(0x0409, flags, NULL, -1, str3, -1);
if (ret != 0)
{
Fail("CompareStringW should have returned 0, got %d!\n", ret);
}
if (GetLastError() != ERROR_INVALID_PARAMETER)
{
Fail("CompareStringW should have set the last error to "
"ERROR_INVALID_PARAMETER!\n");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests that CompareStringW returns the correct value and can handle
** invalid parameters.
**
**
**==========================================================================*/
#define CSTR_LESS_THAN 1
#define CSTR_EQUAL 2
#define CSTR_GREATER_THAN 3
#include <palsuite.h>
PALTEST(locale_info_CompareStringW_test1_paltest_comparestringw_test1, "locale_info/CompareStringW/test1/paltest_comparestringw_test1")
{
WCHAR str1[] = {'f','o','o',0};
WCHAR str2[] = {'f','o','o','x',0};
WCHAR str3[] = {'f','O','o',0};
int flags = NORM_IGNORECASE | NORM_IGNOREWIDTH;
int ret;
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = CompareStringW(0x0409, flags, str1, -1, str2, -1);
if (ret != CSTR_LESS_THAN)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_LESS_THAN!\n", str1, -1, str2, -1);
}
ret = CompareStringW(0x0409, flags, str1, -1, str2, 3);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str1, -1, str2, 3);
}
ret = CompareStringW(0x0409, flags, str2, -1, str1, -1);
if (ret != CSTR_GREATER_THAN)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_GREATER_THAN!\n", str2, -1, str1, -1);
}
ret = CompareStringW(0x0409, flags, str1, -1, str3, -1);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str1, -1, str3, -1);
}
ret = CompareStringW(0x0409, flags, str3, -1, str1, -1);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str3, -1, str1, -1);
}
ret = CompareStringW(0x0409, flags, str3, -1, str1, -1);
if (ret != CSTR_EQUAL)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_EQUAL!\n", str3, -1, str1, -1);
}
ret = CompareStringW(0x0409, flags, str1, 0, str3, -1);
if (ret != CSTR_LESS_THAN)
{
Fail("CompareStringW with \"%S\" (%d) and \"%S\" (%d) did not return "
"CSTR_GREATER_THAN!\n", str1, 0, str3, -1);
}
ret = CompareStringW(0x0409, flags, NULL, -1, str3, -1);
if (ret != 0)
{
Fail("CompareStringW should have returned 0, got %d!\n", ret);
}
if (GetLastError() != ERROR_INVALID_PARAMETER)
{
Fail("CompareStringW should have set the last error to "
"ERROR_INVALID_PARAMETER!\n");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/native/corehost/apphost/apphost.windows.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "apphost.windows.h"
#include "error_codes.h"
#include "pal.h"
#include "trace.h"
#include "utils.h"
#include <shellapi.h>
namespace
{
pal::string_t g_buffered_errors;
void __cdecl buffering_trace_writer(const pal::char_t* message)
{
// Add to buffer for later use.
g_buffered_errors.append(message).append(_X("\n"));
// Also write to stderr immediately
pal::err_fputs(message);
}
// Determines if the current module (apphost executable) is marked as a Windows GUI application
bool is_gui_application()
{
HMODULE module = ::GetModuleHandleW(nullptr);
assert(module != nullptr);
// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
BYTE *bytes = reinterpret_cast<BYTE *>(module);
UINT32 pe_header_offset = reinterpret_cast<IMAGE_DOS_HEADER *>(bytes)->e_lfanew;
UINT16 subsystem = reinterpret_cast<IMAGE_NT_HEADERS *>(bytes + pe_header_offset)->OptionalHeader.Subsystem;
return subsystem == IMAGE_SUBSYSTEM_WINDOWS_GUI;
}
void write_errors_to_event_log(const pal::char_t *executable_path, const pal::char_t *executable_name)
{
// Report errors to the Windows Event Log.
auto eventSource = ::RegisterEventSourceW(nullptr, _X(".NET Runtime"));
const DWORD traceErrorID = 1023; // Matches CoreCLR ERT_UnmanagedFailFast
pal::string_t message;
message.append(_X("Description: A .NET application failed.\n"));
message.append(_X("Application: ")).append(executable_name).append(_X("\n"));
message.append(_X("Path: ")).append(executable_path).append(_X("\n"));
message.append(_X("Message: ")).append(g_buffered_errors).append(_X("\n"));
LPCWSTR messages[] = {message.c_str()};
::ReportEventW(eventSource, EVENTLOG_ERROR_TYPE, 0, traceErrorID, nullptr, 1, 0, messages, nullptr);
::DeregisterEventSource(eventSource);
}
void show_error_dialog(const pal::char_t *executable_name, int error_code)
{
pal::string_t gui_errors_disabled;
if (pal::getenv(_X("DOTNET_DISABLE_GUI_ERRORS"), &gui_errors_disabled) && pal::xtoi(gui_errors_disabled.c_str()) == 1)
return;
pal::string_t dialogMsg;
pal::string_t url;
const pal::string_t url_prefix = _X(" - ") DOTNET_CORE_APPLAUNCH_URL _X("?");
if (error_code == StatusCode::CoreHostLibMissingFailure)
{
dialogMsg = pal::string_t(_X("To run this application, you must install .NET Desktop Runtime ")) + _STRINGIFY(COMMON_HOST_PKG_VER) + _X(" (") + get_arch() + _X(").\n\n");
pal::string_t line;
pal::stringstream_t ss(g_buffered_errors);
while (std::getline(ss, line, _X('\n'))) {
if (starts_with(line, url_prefix, true))
{
size_t offset = url_prefix.length() - pal::strlen(DOTNET_CORE_APPLAUNCH_URL) - 1;
url = line.substr(offset, line.length() - offset);
break;
}
}
}
else if (error_code == StatusCode::FrameworkMissingFailure)
{
// We don't have a great way of passing out different kinds of detailed error info across components, so
// just match the expected error string. See fx_resolver.messages.cpp.
dialogMsg = pal::string_t(_X("To run this application, you must install missing frameworks for .NET.\n\n"));
pal::string_t line;
pal::stringstream_t ss(g_buffered_errors);
while (std::getline(ss, line, _X('\n'))){
const pal::string_t prefix = _X("The framework '");
const pal::string_t suffix = _X("' was not found.");
const pal::string_t custom_prefix = _X(" _ ");
if (starts_with(line, prefix, true) && ends_with(line, suffix, true))
{
dialogMsg.append(line);
dialogMsg.append(_X("\n\n"));
}
else if (starts_with(line, custom_prefix, true))
{
dialogMsg.erase();
dialogMsg.append(line.substr(custom_prefix.length()));
dialogMsg.append(_X("\n\n"));
}
else if (starts_with(line, url_prefix, true))
{
size_t offset = url_prefix.length() - pal::strlen(DOTNET_CORE_APPLAUNCH_URL) - 1;
url = line.substr(offset, line.length() - offset);
break;
}
}
}
else if (error_code == StatusCode::BundleExtractionFailure)
{
pal::string_t line;
pal::stringstream_t ss(g_buffered_errors);
while (std::getline(ss, line, _X('\n'))) {
if (starts_with(line, _X("Bundle header version compatibility check failed."), true))
{
dialogMsg = pal::string_t(_X("To run this application, you must install .NET Desktop Runtime ")) + _STRINGIFY(COMMON_HOST_PKG_VER) + _X(" (") + get_arch() + _X(").\n\n");
url = get_download_url();
url.append(_X("&apphost_version="));
url.append(_STRINGIFY(COMMON_HOST_PKG_VER));
}
}
if (dialogMsg.empty())
return;
}
else
return;
dialogMsg.append(_X("Would you like to download it now?"));
assert(url.length() > 0);
assert(is_gui_application());
url.append(_X("&gui=true"));
trace::verbose(_X("Showing error dialog for application: '%s' - error code: 0x%x - url: '%s'"), executable_name, error_code, url.c_str());
if (::MessageBoxW(nullptr, dialogMsg.c_str(), executable_name, MB_ICONERROR | MB_YESNO) == IDYES)
{
// Open the URL in default browser
::ShellExecuteW(
nullptr,
_X("open"),
url.c_str(),
nullptr,
nullptr,
SW_SHOWNORMAL);
}
}
}
void apphost::buffer_errors()
{
trace::verbose(_X("Redirecting errors to custom writer."));
trace::set_error_writer(buffering_trace_writer);
}
void apphost::write_buffered_errors(int error_code)
{
if (g_buffered_errors.empty())
return;
pal::string_t executable_path;
pal::string_t executable_name;
if (pal::get_own_executable_path(&executable_path))
{
executable_name = get_filename(executable_path);
}
write_errors_to_event_log(executable_path.c_str(), executable_name.c_str());
if (is_gui_application())
show_error_dialog(executable_name.c_str(), error_code);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "apphost.windows.h"
#include "error_codes.h"
#include "pal.h"
#include "trace.h"
#include "utils.h"
#include <shellapi.h>
namespace
{
pal::string_t g_buffered_errors;
void __cdecl buffering_trace_writer(const pal::char_t* message)
{
// Add to buffer for later use.
g_buffered_errors.append(message).append(_X("\n"));
// Also write to stderr immediately
pal::err_fputs(message);
}
// Determines if the current module (apphost executable) is marked as a Windows GUI application
bool is_gui_application()
{
HMODULE module = ::GetModuleHandleW(nullptr);
assert(module != nullptr);
// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
BYTE *bytes = reinterpret_cast<BYTE *>(module);
UINT32 pe_header_offset = reinterpret_cast<IMAGE_DOS_HEADER *>(bytes)->e_lfanew;
UINT16 subsystem = reinterpret_cast<IMAGE_NT_HEADERS *>(bytes + pe_header_offset)->OptionalHeader.Subsystem;
return subsystem == IMAGE_SUBSYSTEM_WINDOWS_GUI;
}
void write_errors_to_event_log(const pal::char_t *executable_path, const pal::char_t *executable_name)
{
// Report errors to the Windows Event Log.
auto eventSource = ::RegisterEventSourceW(nullptr, _X(".NET Runtime"));
const DWORD traceErrorID = 1023; // Matches CoreCLR ERT_UnmanagedFailFast
pal::string_t message;
message.append(_X("Description: A .NET application failed.\n"));
message.append(_X("Application: ")).append(executable_name).append(_X("\n"));
message.append(_X("Path: ")).append(executable_path).append(_X("\n"));
message.append(_X("Message: ")).append(g_buffered_errors).append(_X("\n"));
LPCWSTR messages[] = {message.c_str()};
::ReportEventW(eventSource, EVENTLOG_ERROR_TYPE, 0, traceErrorID, nullptr, 1, 0, messages, nullptr);
::DeregisterEventSource(eventSource);
}
void show_error_dialog(const pal::char_t *executable_name, int error_code)
{
pal::string_t gui_errors_disabled;
if (pal::getenv(_X("DOTNET_DISABLE_GUI_ERRORS"), &gui_errors_disabled) && pal::xtoi(gui_errors_disabled.c_str()) == 1)
return;
pal::string_t dialogMsg;
pal::string_t url;
const pal::string_t url_prefix = _X(" - ") DOTNET_CORE_APPLAUNCH_URL _X("?");
if (error_code == StatusCode::CoreHostLibMissingFailure)
{
dialogMsg = pal::string_t(_X("To run this application, you must install .NET Desktop Runtime ")) + _STRINGIFY(COMMON_HOST_PKG_VER) + _X(" (") + get_arch() + _X(").\n\n");
pal::string_t line;
pal::stringstream_t ss(g_buffered_errors);
while (std::getline(ss, line, _X('\n'))) {
if (starts_with(line, url_prefix, true))
{
size_t offset = url_prefix.length() - pal::strlen(DOTNET_CORE_APPLAUNCH_URL) - 1;
url = line.substr(offset, line.length() - offset);
break;
}
}
}
else if (error_code == StatusCode::FrameworkMissingFailure)
{
// We don't have a great way of passing out different kinds of detailed error info across components, so
// just match the expected error string. See fx_resolver.messages.cpp.
dialogMsg = pal::string_t(_X("To run this application, you must install missing frameworks for .NET.\n\n"));
pal::string_t line;
pal::stringstream_t ss(g_buffered_errors);
while (std::getline(ss, line, _X('\n'))){
const pal::string_t prefix = _X("The framework '");
const pal::string_t suffix = _X("' was not found.");
const pal::string_t custom_prefix = _X(" _ ");
if (starts_with(line, prefix, true) && ends_with(line, suffix, true))
{
dialogMsg.append(line);
dialogMsg.append(_X("\n\n"));
}
else if (starts_with(line, custom_prefix, true))
{
dialogMsg.erase();
dialogMsg.append(line.substr(custom_prefix.length()));
dialogMsg.append(_X("\n\n"));
}
else if (starts_with(line, url_prefix, true))
{
size_t offset = url_prefix.length() - pal::strlen(DOTNET_CORE_APPLAUNCH_URL) - 1;
url = line.substr(offset, line.length() - offset);
break;
}
}
}
else if (error_code == StatusCode::BundleExtractionFailure)
{
pal::string_t line;
pal::stringstream_t ss(g_buffered_errors);
while (std::getline(ss, line, _X('\n'))) {
if (starts_with(line, _X("Bundle header version compatibility check failed."), true))
{
dialogMsg = pal::string_t(_X("To run this application, you must install .NET Desktop Runtime ")) + _STRINGIFY(COMMON_HOST_PKG_VER) + _X(" (") + get_arch() + _X(").\n\n");
url = get_download_url();
url.append(_X("&apphost_version="));
url.append(_STRINGIFY(COMMON_HOST_PKG_VER));
}
}
if (dialogMsg.empty())
return;
}
else
return;
dialogMsg.append(_X("Would you like to download it now?"));
assert(url.length() > 0);
assert(is_gui_application());
url.append(_X("&gui=true"));
trace::verbose(_X("Showing error dialog for application: '%s' - error code: 0x%x - url: '%s'"), executable_name, error_code, url.c_str());
if (::MessageBoxW(nullptr, dialogMsg.c_str(), executable_name, MB_ICONERROR | MB_YESNO) == IDYES)
{
// Open the URL in default browser
::ShellExecuteW(
nullptr,
_X("open"),
url.c_str(),
nullptr,
nullptr,
SW_SHOWNORMAL);
}
}
}
void apphost::buffer_errors()
{
trace::verbose(_X("Redirecting errors to custom writer."));
trace::set_error_writer(buffering_trace_writer);
}
void apphost::write_buffered_errors(int error_code)
{
if (g_buffered_errors.empty())
return;
pal::string_t executable_path;
pal::string_t executable_name;
if (pal::get_own_executable_path(&executable_path))
{
executable_name = get_filename(executable_path);
}
write_errors_to_event_log(executable_path.c_str(), executable_name.c_str());
if (is_gui_application())
show_error_dialog(executable_name.c_str(), error_code);
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/hashbv.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef HASHBV_H
#define HASHBV_H
#if defined(_M_AMD64) || defined(_M_X86)
#include <xmmintrin.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <windows.h>
//#define TESTING 1
#define LOG2_BITS_PER_ELEMENT 5
#define LOG2_ELEMENTS_PER_NODE 2
#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE)
#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT)
#define ELEMENTS_PER_NODE (1 << LOG2_ELEMENTS_PER_NODE)
#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE)
#ifdef TARGET_AMD64
typedef unsigned __int64 elemType;
typedef unsigned __int64 indexType;
#else
typedef unsigned int elemType;
typedef unsigned int indexType;
#endif
class hashBvNode;
class hashBv;
class hashBvIterator;
class hashBvGlobalData;
typedef void bitAction(indexType);
typedef void nodeAction(hashBvNode*);
typedef void dualNodeAction(hashBv* left, hashBv* right, hashBvNode* a, hashBvNode* b);
#define NOMOREBITS -1
#ifdef DEBUG
inline void pBit(indexType i)
{
printf("%d ", i);
}
#endif // DEBUG
// ------------------------------------------------------------
// this is essentially a hashtable of small fixed bitvectors.
// for any index, bits select position as follows:
// 32 0
// ------------------------------------------------------------
// | ... ... ... | hash | element in node | index in element |
// ------------------------------------------------------------
//
//
// hashBv
// | // hashtable
// v
// []->node->node->node
// []->node
// []
// []->node->node
//
//
#if TESTING
inline int log2(int number)
{
int result = 0;
number >>= 1;
while (number)
{
result++;
number >>= 1;
}
return result;
}
#endif
// return greatest power of 2 that is less than or equal
inline int nearest_pow2(unsigned number)
{
int result = 0;
if (number > 0xffff)
{
number >>= 16;
result += 16;
}
if (number > 0xff)
{
number >>= 8;
result += 8;
}
if (number > 0xf)
{
number >>= 4;
result += 4;
}
if (number > 0x3)
{
number >>= 2;
result += 2;
}
if (number > 0x1)
{
number >>= 1;
result += 1;
}
return 1 << result;
}
class hashBvNode
{
public:
hashBvNode* next;
indexType baseIndex;
elemType elements[ELEMENTS_PER_NODE];
public:
hashBvNode(indexType base);
hashBvNode()
{
}
static hashBvNode* Create(indexType base, Compiler* comp);
void Reconstruct(indexType base);
int numElements()
{
return ELEMENTS_PER_NODE;
}
void setBit(indexType base);
void setLowest(indexType numToSet);
bool getBit(indexType base);
void clrBit(indexType base);
bool anySet();
bool belongsIn(indexType index);
int countBits();
bool anyBits();
void foreachBit(bitAction x);
void freeNode(hashBvGlobalData* glob);
bool sameAs(hashBvNode* other);
void copyFrom(hashBvNode* other);
void AndWith(hashBvNode* other);
void OrWith(hashBvNode* other);
void XorWith(hashBvNode* other);
void Subtract(hashBvNode* other);
elemType AndWithChange(hashBvNode* other);
elemType OrWithChange(hashBvNode* other);
elemType XorWithChange(hashBvNode* other);
elemType SubtractWithChange(hashBvNode* other);
bool Intersects(hashBvNode* other);
#ifdef DEBUG
void dump();
#endif // DEBUG
};
class hashBv
{
public:
// --------------------------------------
// data
// --------------------------------------
hashBvNode** nodeArr;
hashBvNode* initialVector[1];
union {
Compiler* compiler;
// for freelist
hashBv* next;
};
unsigned short log2_hashSize;
// used for heuristic resizing... could be overflowed in rare circumstances
// but should not affect correctness
unsigned short numNodes;
public:
hashBv(Compiler* comp);
static hashBv* Create(Compiler* comp);
static void Init(Compiler* comp);
static hashBv* CreateFrom(hashBv* other, Compiler* comp);
void hbvFree();
#ifdef DEBUG
void dump();
void dumpFancy();
#endif // DEBUG
__forceinline int hashtable_size()
{
return 1 << this->log2_hashSize;
}
hashBvGlobalData* globalData();
static hashBvNode*& nodeFreeList(hashBvGlobalData* globalData);
static hashBv*& hbvFreeList(hashBvGlobalData* data);
hashBvNode** getInsertionPointForIndex(indexType index);
private:
hashBvNode* getNodeForIndexHelper(indexType index, bool canAdd);
int getHashForIndex(indexType index, int table_size);
int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize);
// maintain free lists for vectors
hashBvNode** getNewVector(int vectorLength);
int getNodeCount();
public:
inline hashBvNode* getOrAddNodeForIndex(indexType index)
{
hashBvNode* temp = getNodeForIndexHelper(index, true);
return temp;
}
hashBvNode* getNodeForIndex(indexType index);
void removeNodeAtBase(indexType index);
public:
void setBit(indexType index);
void setAll(indexType numToSet);
bool testBit(indexType index);
void clearBit(indexType index);
int countBits();
bool anySet();
void copyFrom(hashBv* other, Compiler* comp);
void ZeroAll();
bool CompareWith(hashBv* other);
void AndWith(hashBv* other);
void OrWith(hashBv* other);
void XorWith(hashBv* other);
void Subtract(hashBv* other);
void Subtract3(hashBv* other, hashBv* other2);
void UnionMinus(hashBv* a, hashBv* b, hashBv* c);
bool AndWithChange(hashBv* other);
bool OrWithChange(hashBv* other);
bool OrWithChangeRight(hashBv* other);
bool OrWithChangeLeft(hashBv* other);
bool XorWithChange(hashBv* other);
bool SubtractWithChange(hashBv* other);
bool Intersects(hashBv* other);
template <class Action>
bool MultiTraverseLHSBigger(hashBv* other);
template <class Action>
bool MultiTraverseRHSBigger(hashBv* other);
template <class Action>
bool MultiTraverseEqual(hashBv* other);
template <class Action>
bool MultiTraverse(hashBv* other);
void InorderTraverse(nodeAction a);
void InorderTraverseTwo(hashBv* other, dualNodeAction a);
void Resize(int newSize);
void Resize();
void MergeLists(hashBvNode** a, hashBvNode** b);
bool TooSmall();
bool TooBig();
bool IsValid();
};
// --------------------------------------------------------------------
// --------------------------------------------------------------------
class hashBvIterator
{
public:
unsigned hashtable_size;
unsigned hashtable_index;
hashBv* bv;
hashBvNode* currNode;
indexType current_element;
// base index of current node
indexType current_base;
// working data of current element
elemType current_data;
hashBvIterator(hashBv* bv);
void initFrom(hashBv* bv);
hashBvIterator();
indexType nextBit();
private:
void nextNode();
};
class hashBvGlobalData
{
friend class hashBv;
friend class hashBvNode;
hashBvNode* hbvNodeFreeList;
hashBv* hbvFreeList;
};
// clang-format off
#define FOREACH_HBV_BIT_SET(index, bv) \
{ \
for (int hashNum=0; hashNum<(bv)->hashtable_size(); hashNum++) {\
hashBvNode *node = (bv)->nodeArr[hashNum];\
while (node) { \
indexType base = node->baseIndex; \
for (int el=0; el<node->numElements(); el++) {\
elemType _i = 0; \
elemType _e = node->elements[el]; \
while (_e) { \
int _result = BitScanForwardPtr((DWORD *) &_i, _e); \
assert(_result); \
(index) = base + (el*BITS_PER_ELEMENT) + _i; \
_e ^= (elemType(1) << _i);
#define NEXT_HBV_BIT_SET \
}\
}\
node = node->next; \
}\
}\
} \
//clang-format on
#ifdef DEBUG
void SimpleDumpNode(hashBvNode *n);
void DumpNode(hashBvNode *n);
void SimpleDumpDualNode(hashBv *a, hashBv *b, hashBvNode *n, hashBvNode *m);
#endif // DEBUG
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef HASHBV_H
#define HASHBV_H
#if defined(_M_AMD64) || defined(_M_X86)
#include <xmmintrin.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <windows.h>
//#define TESTING 1
#define LOG2_BITS_PER_ELEMENT 5
#define LOG2_ELEMENTS_PER_NODE 2
#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE)
#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT)
#define ELEMENTS_PER_NODE (1 << LOG2_ELEMENTS_PER_NODE)
#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE)
#ifdef TARGET_AMD64
typedef unsigned __int64 elemType;
typedef unsigned __int64 indexType;
#else
typedef unsigned int elemType;
typedef unsigned int indexType;
#endif
class hashBvNode;
class hashBv;
class hashBvIterator;
class hashBvGlobalData;
typedef void bitAction(indexType);
typedef void nodeAction(hashBvNode*);
typedef void dualNodeAction(hashBv* left, hashBv* right, hashBvNode* a, hashBvNode* b);
#define NOMOREBITS -1
#ifdef DEBUG
inline void pBit(indexType i)
{
printf("%d ", i);
}
#endif // DEBUG
// ------------------------------------------------------------
// this is essentially a hashtable of small fixed bitvectors.
// for any index, bits select position as follows:
// 32 0
// ------------------------------------------------------------
// | ... ... ... | hash | element in node | index in element |
// ------------------------------------------------------------
//
//
// hashBv
// | // hashtable
// v
// []->node->node->node
// []->node
// []
// []->node->node
//
//
#if TESTING
inline int log2(int number)
{
int result = 0;
number >>= 1;
while (number)
{
result++;
number >>= 1;
}
return result;
}
#endif
// return greatest power of 2 that is less than or equal
inline int nearest_pow2(unsigned number)
{
int result = 0;
if (number > 0xffff)
{
number >>= 16;
result += 16;
}
if (number > 0xff)
{
number >>= 8;
result += 8;
}
if (number > 0xf)
{
number >>= 4;
result += 4;
}
if (number > 0x3)
{
number >>= 2;
result += 2;
}
if (number > 0x1)
{
number >>= 1;
result += 1;
}
return 1 << result;
}
class hashBvNode
{
public:
hashBvNode* next;
indexType baseIndex;
elemType elements[ELEMENTS_PER_NODE];
public:
hashBvNode(indexType base);
hashBvNode()
{
}
static hashBvNode* Create(indexType base, Compiler* comp);
void Reconstruct(indexType base);
int numElements()
{
return ELEMENTS_PER_NODE;
}
void setBit(indexType base);
void setLowest(indexType numToSet);
bool getBit(indexType base);
void clrBit(indexType base);
bool anySet();
bool belongsIn(indexType index);
int countBits();
bool anyBits();
void foreachBit(bitAction x);
void freeNode(hashBvGlobalData* glob);
bool sameAs(hashBvNode* other);
void copyFrom(hashBvNode* other);
void AndWith(hashBvNode* other);
void OrWith(hashBvNode* other);
void XorWith(hashBvNode* other);
void Subtract(hashBvNode* other);
elemType AndWithChange(hashBvNode* other);
elemType OrWithChange(hashBvNode* other);
elemType XorWithChange(hashBvNode* other);
elemType SubtractWithChange(hashBvNode* other);
bool Intersects(hashBvNode* other);
#ifdef DEBUG
void dump();
#endif // DEBUG
};
class hashBv
{
public:
// --------------------------------------
// data
// --------------------------------------
hashBvNode** nodeArr;
hashBvNode* initialVector[1];
union {
Compiler* compiler;
// for freelist
hashBv* next;
};
unsigned short log2_hashSize;
// used for heuristic resizing... could be overflowed in rare circumstances
// but should not affect correctness
unsigned short numNodes;
public:
hashBv(Compiler* comp);
static hashBv* Create(Compiler* comp);
static void Init(Compiler* comp);
static hashBv* CreateFrom(hashBv* other, Compiler* comp);
void hbvFree();
#ifdef DEBUG
void dump();
void dumpFancy();
#endif // DEBUG
__forceinline int hashtable_size()
{
return 1 << this->log2_hashSize;
}
hashBvGlobalData* globalData();
static hashBvNode*& nodeFreeList(hashBvGlobalData* globalData);
static hashBv*& hbvFreeList(hashBvGlobalData* data);
hashBvNode** getInsertionPointForIndex(indexType index);
private:
hashBvNode* getNodeForIndexHelper(indexType index, bool canAdd);
int getHashForIndex(indexType index, int table_size);
int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize);
// maintain free lists for vectors
hashBvNode** getNewVector(int vectorLength);
int getNodeCount();
public:
inline hashBvNode* getOrAddNodeForIndex(indexType index)
{
hashBvNode* temp = getNodeForIndexHelper(index, true);
return temp;
}
hashBvNode* getNodeForIndex(indexType index);
void removeNodeAtBase(indexType index);
public:
void setBit(indexType index);
void setAll(indexType numToSet);
bool testBit(indexType index);
void clearBit(indexType index);
int countBits();
bool anySet();
void copyFrom(hashBv* other, Compiler* comp);
void ZeroAll();
bool CompareWith(hashBv* other);
void AndWith(hashBv* other);
void OrWith(hashBv* other);
void XorWith(hashBv* other);
void Subtract(hashBv* other);
void Subtract3(hashBv* other, hashBv* other2);
void UnionMinus(hashBv* a, hashBv* b, hashBv* c);
bool AndWithChange(hashBv* other);
bool OrWithChange(hashBv* other);
bool OrWithChangeRight(hashBv* other);
bool OrWithChangeLeft(hashBv* other);
bool XorWithChange(hashBv* other);
bool SubtractWithChange(hashBv* other);
bool Intersects(hashBv* other);
template <class Action>
bool MultiTraverseLHSBigger(hashBv* other);
template <class Action>
bool MultiTraverseRHSBigger(hashBv* other);
template <class Action>
bool MultiTraverseEqual(hashBv* other);
template <class Action>
bool MultiTraverse(hashBv* other);
void InorderTraverse(nodeAction a);
void InorderTraverseTwo(hashBv* other, dualNodeAction a);
void Resize(int newSize);
void Resize();
void MergeLists(hashBvNode** a, hashBvNode** b);
bool TooSmall();
bool TooBig();
bool IsValid();
};
// --------------------------------------------------------------------
// --------------------------------------------------------------------
class hashBvIterator
{
public:
unsigned hashtable_size;
unsigned hashtable_index;
hashBv* bv;
hashBvNode* currNode;
indexType current_element;
// base index of current node
indexType current_base;
// working data of current element
elemType current_data;
hashBvIterator(hashBv* bv);
void initFrom(hashBv* bv);
hashBvIterator();
indexType nextBit();
private:
void nextNode();
};
class hashBvGlobalData
{
friend class hashBv;
friend class hashBvNode;
hashBvNode* hbvNodeFreeList;
hashBv* hbvFreeList;
};
// clang-format off
#define FOREACH_HBV_BIT_SET(index, bv) \
{ \
for (int hashNum=0; hashNum<(bv)->hashtable_size(); hashNum++) {\
hashBvNode *node = (bv)->nodeArr[hashNum];\
while (node) { \
indexType base = node->baseIndex; \
for (int el=0; el<node->numElements(); el++) {\
elemType _i = 0; \
elemType _e = node->elements[el]; \
while (_e) { \
int _result = BitScanForwardPtr((DWORD *) &_i, _e); \
assert(_result); \
(index) = base + (el*BITS_PER_ELEMENT) + _i; \
_e ^= (elemType(1) << _i);
#define NEXT_HBV_BIT_SET \
}\
}\
node = node->next; \
}\
}\
} \
//clang-format on
#ifdef DEBUG
void SimpleDumpNode(hashBvNode *n);
void DumpNode(hashBvNode *n);
void SimpleDumpDualNode(hashBv *a, hashBv *b, hashBvNode *n, hashBvNode *m);
#endif // DEBUG
#endif
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/regalloc.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef REGALLOC_H_
#define REGALLOC_H_
enum FrameType
{
FT_NOT_SET,
FT_ESP_FRAME,
FT_EBP_FRAME,
#if DOUBLE_ALIGN
FT_DOUBLE_ALIGN_FRAME,
#endif
};
#if DOUBLE_ALIGN
enum CanDoubleAlign
{
CANT_DOUBLE_ALIGN,
CAN_DOUBLE_ALIGN,
MUST_DOUBLE_ALIGN,
COUNT_DOUBLE_ALIGN,
DEFAULT_DOUBLE_ALIGN = CAN_DOUBLE_ALIGN
};
#endif
#endif // REGALLOC_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef REGALLOC_H_
#define REGALLOC_H_
enum FrameType
{
FT_NOT_SET,
FT_ESP_FRAME,
FT_EBP_FRAME,
#if DOUBLE_ALIGN
FT_DOUBLE_ALIGN_FRAME,
#endif
};
#if DOUBLE_ALIGN
enum CanDoubleAlign
{
CANT_DOUBLE_ALIGN,
CAN_DOUBLE_ALIGN,
MUST_DOUBLE_ALIGN,
COUNT_DOUBLE_ALIGN,
DEFAULT_DOUBLE_ALIGN = CAN_DOUBLE_ALIGN
};
#endif
#endif // REGALLOC_H_
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/typeinfo.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX typeInfo XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "_typeinfo.h"
bool Compiler::tiCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const
{
return typeInfo::tiCompatibleWith(info.compCompHnd, child, parent, normalisedForStack);
}
bool Compiler::tiMergeCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const
{
return typeInfo::tiMergeCompatibleWith(info.compCompHnd, child, parent, normalisedForStack);
}
bool Compiler::tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const
{
return typeInfo::tiMergeToCommonParent(info.compCompHnd, pDest, pSrc, changed);
}
static bool tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent)
{
assert(parent.IsByRef());
if (!child.IsByRef())
{
return false;
}
if (child.IsReadonlyByRef() && !parent.IsReadonlyByRef())
{
return false;
}
// Byrefs are compatible if the underlying types are equivalent
typeInfo childTarget = ::DereferenceByRef(child);
typeInfo parentTarget = ::DereferenceByRef(parent);
if (typeInfo::AreEquivalent(childTarget, parentTarget))
{
return true;
}
// Make sure that both types have a valid m_cls
if ((childTarget.IsType(TI_REF) || childTarget.IsType(TI_STRUCT)) &&
(parentTarget.IsType(TI_REF) || parentTarget.IsType(TI_STRUCT)))
{
return CompHnd->areTypesEquivalent(childTarget.GetClassHandle(), parentTarget.GetClassHandle());
}
return false;
}
/*****************************************************************************
* Verify child is compatible with the template parent. Basically, that
* child is a "subclass" of parent -it can be substituted for parent
* anywhere. Note that if parent contains fancy flags, such as "uninitialized"
* , "is this ptr", or "has byref local/field" info, then child must also
* contain those flags, otherwise FALSE will be returned !
*
* Rules for determining compatibility:
*
* If parent is a primitive type or value class, then child must be the
* same primitive type or value class. The exception is that the built in
* value classes System/Boolean etc. are treated as synonyms for
* TI_BYTE etc.
*
* If parent is a byref of a primitive type or value class, then child
* must be a byref of the same (rules same as above case).
*
* Byrefs are compatible only with byrefs.
*
* If parent is an object, child must be a subclass of it, implement it
* (if it is an interface), or be null.
*
* If parent is an array, child must be the same or subclassed array.
*
* If parent is a null objref, only null is compatible with it.
*
* If the "uninitialized", "by ref local/field", "this pointer" or other flags
* are different, the items are incompatible.
*
* parent CANNOT be an undefined (dead) item.
*
*/
bool typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd,
const typeInfo& child,
const typeInfo& parent,
bool normalisedForStack)
{
assert(child.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(child), child));
assert(parent.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(parent), parent));
if (typeInfo::AreEquivalent(child, parent))
{
return true;
}
if (parent.IsUnboxedGenericTypeVar() || child.IsUnboxedGenericTypeVar())
{
return false; // need to have had child == parent
}
else if (parent.IsType(TI_REF))
{
// An uninitialized objRef is not compatible to initialized.
if (child.IsUninitialisedObjRef() && !parent.IsUninitialisedObjRef())
{
return false;
}
if (child.IsNullObjRef())
{ // NULL can be any reference type
return true;
}
if (!child.IsType(TI_REF))
{
return false;
}
return CompHnd->canCast(child.m_cls, parent.m_cls);
}
else if (parent.IsType(TI_METHOD))
{
if (!child.IsType(TI_METHOD))
{
return false;
}
// Right now we don't bother merging method handles
return false;
}
else if (parent.IsType(TI_STRUCT))
{
if (!child.IsType(TI_STRUCT))
{
return false;
}
// Structures are compatible if they are equivalent
return CompHnd->areTypesEquivalent(child.m_cls, parent.m_cls);
}
else if (parent.IsByRef())
{
return tiCompatibleWithByRef(CompHnd, child, parent);
}
#ifdef TARGET_64BIT
// On 64-bit targets we have precise representation for native int, so these rules
// represent the fact that the ECMA spec permits the implicit conversion
// between an int32 and a native int.
else if (parent.IsType(TI_INT) && typeInfo::AreEquivalent(nativeInt(), child))
{
return true;
}
else if (typeInfo::AreEquivalent(nativeInt(), parent) && child.IsType(TI_INT))
{
return true;
}
#endif // TARGET_64BIT
return false;
}
bool typeInfo::tiMergeCompatibleWith(COMP_HANDLE CompHnd,
const typeInfo& child,
const typeInfo& parent,
bool normalisedForStack)
{
if (!child.IsPermanentHomeByRef() && parent.IsPermanentHomeByRef())
{
return false;
}
return typeInfo::tiCompatibleWith(CompHnd, child, parent, normalisedForStack);
}
/*****************************************************************************
* Merge pDest and pSrc to find some commonality (e.g. a common parent).
* Copy the result to pDest, marking it dead if no commonality can be found.
*
* null ^ null -> null
* Object ^ null -> Object
* [I4 ^ null -> [I4
* InputStream ^ OutputStream -> Stream
* InputStream ^ NULL -> InputStream
* [I4 ^ Object -> Object
* [I4 ^ [Object -> Array
* [I4 ^ [R8 -> Array
* [Foo ^ I4 -> DEAD
* [Foo ^ [I1 -> Array
* [InputStream ^ [OutputStream -> Array
* DEAD ^ X -> DEAD
* [Intfc ^ [OutputStream -> Array
* Intf ^ [OutputStream -> Object
* [[InStream ^ [[OutStream -> Array
* [[InStream ^ [OutStream -> Array
* [[Foo ^ [Object -> Array
*
* Importantly:
* [I1 ^ [U1 -> either [I1 or [U1
* etc.
*
* Also, System/Int32 and I4 merge -> I4, etc.
*
* Returns FALSE if the merge was completely incompatible (i.e. the item became
* dead).
*
*/
bool typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const typeInfo* pSrc, bool* changed)
{
assert(pSrc->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pSrc), *pSrc));
assert(pDest->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pDest), *pDest));
// Merge the auxiliary information like "this" pointer tracking, etc...
// Remember the pre-state, so we can tell if it changed.
*changed = false;
DWORD destFlagsBefore = pDest->m_flags;
// This bit is only set if both pDest and pSrc have it set
pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_THIS_PTR);
// This bit is set if either pDest or pSrc have it set
pDest->m_flags |= (pSrc->m_flags & TI_FLAG_UNINIT_OBJREF);
// This bit is set if either pDest or pSrc have it set
pDest->m_flags |= (pSrc->m_flags & TI_FLAG_BYREF_READONLY);
// If the byref wasn't permanent home in both sides, then merge won't have the bit set
pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_BYREF_PERMANENT_HOME);
if (pDest->m_flags != destFlagsBefore)
{
*changed = true;
}
// OK the main event. Merge the main types
if (typeInfo::AreEquivalent(*pDest, *pSrc))
{
return true;
}
if (pDest->IsUnboxedGenericTypeVar() || pSrc->IsUnboxedGenericTypeVar())
{
// Should have had *pDest == *pSrc
goto FAIL;
}
if (pDest->IsType(TI_REF))
{
if (pSrc->IsType(TI_NULL))
{ // NULL can be any reference type
return true;
}
if (!pSrc->IsType(TI_REF))
{
goto FAIL;
}
// Ask the EE to find the common parent, This always succeeds since System.Object always works
CORINFO_CLASS_HANDLE pDestClsBefore = pDest->m_cls;
pDest->m_cls = CompHnd->mergeClasses(pDest->GetClassHandle(), pSrc->GetClassHandle());
if (pDestClsBefore != pDest->m_cls)
{
*changed = true;
}
return true;
}
else if (pDest->IsType(TI_NULL))
{
if (pSrc->IsType(TI_REF)) // NULL can be any reference type
{
*pDest = *pSrc;
*changed = true;
return true;
}
goto FAIL;
}
else if (pDest->IsType(TI_STRUCT))
{
if (pSrc->IsType(TI_STRUCT) && CompHnd->areTypesEquivalent(pDest->GetClassHandle(), pSrc->GetClassHandle()))
{
return true;
}
goto FAIL;
}
else if (pDest->IsByRef())
{
return tiCompatibleWithByRef(CompHnd, *pSrc, *pDest);
}
#ifdef TARGET_64BIT
// On 64-bit targets we have precise representation for native int, so these rules
// represent the fact that the ECMA spec permits the implicit conversion
// between an int32 and a native int.
else if (typeInfo::AreEquivalent(*pDest, typeInfo::nativeInt()) && pSrc->IsType(TI_INT))
{
return true;
}
else if (typeInfo::AreEquivalent(*pSrc, typeInfo::nativeInt()) && pDest->IsType(TI_INT))
{
*pDest = *pSrc;
*changed = true;
return true;
}
#endif // TARGET_64BIT
FAIL:
*pDest = typeInfo();
return false;
}
#ifdef DEBUG
#if VERBOSE_VERIFY
// Utility method to have a detailed dump of a TypeInfo object
void typeInfo::Dump() const
{
char flagsStr[8];
flagsStr[0] = ((m_flags & TI_FLAG_UNINIT_OBJREF) != 0) ? 'U' : '-';
flagsStr[1] = ((m_flags & TI_FLAG_BYREF) != 0) ? 'B' : '-';
flagsStr[2] = ((m_flags & TI_FLAG_BYREF_READONLY) != 0) ? 'R' : '-';
flagsStr[3] = ((m_flags & TI_FLAG_NATIVE_INT) != 0) ? 'N' : '-';
flagsStr[4] = ((m_flags & TI_FLAG_THIS_PTR) != 0) ? 'T' : '-';
flagsStr[5] = ((m_flags & TI_FLAG_BYREF_PERMANENT_HOME) != 0) ? 'P' : '-';
flagsStr[6] = ((m_flags & TI_FLAG_GENERIC_TYPE_VAR) != 0) ? 'G' : '-';
flagsStr[7] = '\0';
printf("[%s(%X) {%s}]", tiType2Str(m_bits.type), m_cls, flagsStr);
}
#endif // VERBOSE_VERIFY
#endif // DEBUG
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX typeInfo XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "_typeinfo.h"
bool Compiler::tiCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const
{
return typeInfo::tiCompatibleWith(info.compCompHnd, child, parent, normalisedForStack);
}
bool Compiler::tiMergeCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const
{
return typeInfo::tiMergeCompatibleWith(info.compCompHnd, child, parent, normalisedForStack);
}
bool Compiler::tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const
{
return typeInfo::tiMergeToCommonParent(info.compCompHnd, pDest, pSrc, changed);
}
static bool tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent)
{
assert(parent.IsByRef());
if (!child.IsByRef())
{
return false;
}
if (child.IsReadonlyByRef() && !parent.IsReadonlyByRef())
{
return false;
}
// Byrefs are compatible if the underlying types are equivalent
typeInfo childTarget = ::DereferenceByRef(child);
typeInfo parentTarget = ::DereferenceByRef(parent);
if (typeInfo::AreEquivalent(childTarget, parentTarget))
{
return true;
}
// Make sure that both types have a valid m_cls
if ((childTarget.IsType(TI_REF) || childTarget.IsType(TI_STRUCT)) &&
(parentTarget.IsType(TI_REF) || parentTarget.IsType(TI_STRUCT)))
{
return CompHnd->areTypesEquivalent(childTarget.GetClassHandle(), parentTarget.GetClassHandle());
}
return false;
}
/*****************************************************************************
* Verify child is compatible with the template parent. Basically, that
* child is a "subclass" of parent -it can be substituted for parent
* anywhere. Note that if parent contains fancy flags, such as "uninitialized"
* , "is this ptr", or "has byref local/field" info, then child must also
* contain those flags, otherwise FALSE will be returned !
*
* Rules for determining compatibility:
*
* If parent is a primitive type or value class, then child must be the
* same primitive type or value class. The exception is that the built in
* value classes System/Boolean etc. are treated as synonyms for
* TI_BYTE etc.
*
* If parent is a byref of a primitive type or value class, then child
* must be a byref of the same (rules same as above case).
*
* Byrefs are compatible only with byrefs.
*
* If parent is an object, child must be a subclass of it, implement it
* (if it is an interface), or be null.
*
* If parent is an array, child must be the same or subclassed array.
*
* If parent is a null objref, only null is compatible with it.
*
* If the "uninitialized", "by ref local/field", "this pointer" or other flags
* are different, the items are incompatible.
*
* parent CANNOT be an undefined (dead) item.
*
*/
bool typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd,
const typeInfo& child,
const typeInfo& parent,
bool normalisedForStack)
{
assert(child.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(child), child));
assert(parent.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(parent), parent));
if (typeInfo::AreEquivalent(child, parent))
{
return true;
}
if (parent.IsUnboxedGenericTypeVar() || child.IsUnboxedGenericTypeVar())
{
return false; // need to have had child == parent
}
else if (parent.IsType(TI_REF))
{
// An uninitialized objRef is not compatible to initialized.
if (child.IsUninitialisedObjRef() && !parent.IsUninitialisedObjRef())
{
return false;
}
if (child.IsNullObjRef())
{ // NULL can be any reference type
return true;
}
if (!child.IsType(TI_REF))
{
return false;
}
return CompHnd->canCast(child.m_cls, parent.m_cls);
}
else if (parent.IsType(TI_METHOD))
{
if (!child.IsType(TI_METHOD))
{
return false;
}
// Right now we don't bother merging method handles
return false;
}
else if (parent.IsType(TI_STRUCT))
{
if (!child.IsType(TI_STRUCT))
{
return false;
}
// Structures are compatible if they are equivalent
return CompHnd->areTypesEquivalent(child.m_cls, parent.m_cls);
}
else if (parent.IsByRef())
{
return tiCompatibleWithByRef(CompHnd, child, parent);
}
#ifdef TARGET_64BIT
// On 64-bit targets we have precise representation for native int, so these rules
// represent the fact that the ECMA spec permits the implicit conversion
// between an int32 and a native int.
else if (parent.IsType(TI_INT) && typeInfo::AreEquivalent(nativeInt(), child))
{
return true;
}
else if (typeInfo::AreEquivalent(nativeInt(), parent) && child.IsType(TI_INT))
{
return true;
}
#endif // TARGET_64BIT
return false;
}
bool typeInfo::tiMergeCompatibleWith(COMP_HANDLE CompHnd,
const typeInfo& child,
const typeInfo& parent,
bool normalisedForStack)
{
if (!child.IsPermanentHomeByRef() && parent.IsPermanentHomeByRef())
{
return false;
}
return typeInfo::tiCompatibleWith(CompHnd, child, parent, normalisedForStack);
}
/*****************************************************************************
* Merge pDest and pSrc to find some commonality (e.g. a common parent).
* Copy the result to pDest, marking it dead if no commonality can be found.
*
* null ^ null -> null
* Object ^ null -> Object
* [I4 ^ null -> [I4
* InputStream ^ OutputStream -> Stream
* InputStream ^ NULL -> InputStream
* [I4 ^ Object -> Object
* [I4 ^ [Object -> Array
* [I4 ^ [R8 -> Array
* [Foo ^ I4 -> DEAD
* [Foo ^ [I1 -> Array
* [InputStream ^ [OutputStream -> Array
* DEAD ^ X -> DEAD
* [Intfc ^ [OutputStream -> Array
* Intf ^ [OutputStream -> Object
* [[InStream ^ [[OutStream -> Array
* [[InStream ^ [OutStream -> Array
* [[Foo ^ [Object -> Array
*
* Importantly:
* [I1 ^ [U1 -> either [I1 or [U1
* etc.
*
* Also, System/Int32 and I4 merge -> I4, etc.
*
* Returns FALSE if the merge was completely incompatible (i.e. the item became
* dead).
*
*/
bool typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const typeInfo* pSrc, bool* changed)
{
assert(pSrc->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pSrc), *pSrc));
assert(pDest->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pDest), *pDest));
// Merge the auxiliary information like "this" pointer tracking, etc...
// Remember the pre-state, so we can tell if it changed.
*changed = false;
DWORD destFlagsBefore = pDest->m_flags;
// This bit is only set if both pDest and pSrc have it set
pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_THIS_PTR);
// This bit is set if either pDest or pSrc have it set
pDest->m_flags |= (pSrc->m_flags & TI_FLAG_UNINIT_OBJREF);
// This bit is set if either pDest or pSrc have it set
pDest->m_flags |= (pSrc->m_flags & TI_FLAG_BYREF_READONLY);
// If the byref wasn't permanent home in both sides, then merge won't have the bit set
pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_BYREF_PERMANENT_HOME);
if (pDest->m_flags != destFlagsBefore)
{
*changed = true;
}
// OK the main event. Merge the main types
if (typeInfo::AreEquivalent(*pDest, *pSrc))
{
return true;
}
if (pDest->IsUnboxedGenericTypeVar() || pSrc->IsUnboxedGenericTypeVar())
{
// Should have had *pDest == *pSrc
goto FAIL;
}
if (pDest->IsType(TI_REF))
{
if (pSrc->IsType(TI_NULL))
{ // NULL can be any reference type
return true;
}
if (!pSrc->IsType(TI_REF))
{
goto FAIL;
}
// Ask the EE to find the common parent, This always succeeds since System.Object always works
CORINFO_CLASS_HANDLE pDestClsBefore = pDest->m_cls;
pDest->m_cls = CompHnd->mergeClasses(pDest->GetClassHandle(), pSrc->GetClassHandle());
if (pDestClsBefore != pDest->m_cls)
{
*changed = true;
}
return true;
}
else if (pDest->IsType(TI_NULL))
{
if (pSrc->IsType(TI_REF)) // NULL can be any reference type
{
*pDest = *pSrc;
*changed = true;
return true;
}
goto FAIL;
}
else if (pDest->IsType(TI_STRUCT))
{
if (pSrc->IsType(TI_STRUCT) && CompHnd->areTypesEquivalent(pDest->GetClassHandle(), pSrc->GetClassHandle()))
{
return true;
}
goto FAIL;
}
else if (pDest->IsByRef())
{
return tiCompatibleWithByRef(CompHnd, *pSrc, *pDest);
}
#ifdef TARGET_64BIT
// On 64-bit targets we have precise representation for native int, so these rules
// represent the fact that the ECMA spec permits the implicit conversion
// between an int32 and a native int.
else if (typeInfo::AreEquivalent(*pDest, typeInfo::nativeInt()) && pSrc->IsType(TI_INT))
{
return true;
}
else if (typeInfo::AreEquivalent(*pSrc, typeInfo::nativeInt()) && pDest->IsType(TI_INT))
{
*pDest = *pSrc;
*changed = true;
return true;
}
#endif // TARGET_64BIT
FAIL:
*pDest = typeInfo();
return false;
}
#ifdef DEBUG
#if VERBOSE_VERIFY
// Utility method to have a detailed dump of a TypeInfo object
void typeInfo::Dump() const
{
char flagsStr[8];
flagsStr[0] = ((m_flags & TI_FLAG_UNINIT_OBJREF) != 0) ? 'U' : '-';
flagsStr[1] = ((m_flags & TI_FLAG_BYREF) != 0) ? 'B' : '-';
flagsStr[2] = ((m_flags & TI_FLAG_BYREF_READONLY) != 0) ? 'R' : '-';
flagsStr[3] = ((m_flags & TI_FLAG_NATIVE_INT) != 0) ? 'N' : '-';
flagsStr[4] = ((m_flags & TI_FLAG_THIS_PTR) != 0) ? 'T' : '-';
flagsStr[5] = ((m_flags & TI_FLAG_BYREF_PERMANENT_HOME) != 0) ? 'P' : '-';
flagsStr[6] = ((m_flags & TI_FLAG_GENERIC_TYPE_VAR) != 0) ? 'G' : '-';
flagsStr[7] = '\0';
printf("[%s(%X) {%s}]", tiType2Str(m_bits.type), m_cls, flagsStr);
}
#endif // VERBOSE_VERIFY
#endif // DEBUG
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/bitsetasuint64inclass.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef bitSetAsUint64InClass_DEFINED
#define bitSetAsUint64InClass_DEFINED 1
#include "bitset.h"
#include "bitsetasuint64.h"
#include "stdmacros.h"
template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType;
template <typename Env, typename BitSetTraits>
class BitSetUint64Iter;
template <typename Env, typename BitSetTraits>
class BitSetUint64
{
public:
typedef BitSetUint64<Env, BitSetTraits> Rep;
private:
friend class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
/*Brand*/ BSUInt64Class,
/*Env*/ Env,
/*BitSetTraits*/ BitSetTraits>;
friend class BitSetUint64ValueRetType<Env, BitSetTraits>;
UINT64 m_bits;
#ifdef DEBUG
unsigned m_epoch;
#endif
typedef BitSetOps<UINT64, BSUInt64, Env, BitSetTraits> Uint64BitSetOps;
void CheckEpoch(Env env) const
{
#ifdef DEBUG
assert(m_epoch == BitSetTraits::GetEpoch(env));
#endif
}
bool operator==(const BitSetUint64& bs) const
{
return m_bits == bs.m_bits
#ifdef DEBUG
&& m_epoch == bs.m_epoch
#endif
;
}
public:
BitSetUint64& operator=(const BitSetUint64& bs)
{
m_bits = bs.m_bits;
#ifdef DEBUG
m_epoch = bs.m_epoch;
#endif // DEBUG
return (*this);
}
BitSetUint64(const BitSetUint64& bs)
: m_bits(bs.m_bits)
#ifdef DEBUG
, m_epoch(bs.m_epoch)
#endif
{
}
private:
// Return the number of bits set in the BitSet.
inline unsigned Count(Env env) const
{
CheckEpoch(env);
return Uint64BitSetOps::Count(env, m_bits);
}
inline void DiffD(Env env, const BitSetUint64& bs2)
{
CheckEpoch(env);
bs2.CheckEpoch(env);
Uint64BitSetOps::DiffD(env, m_bits, bs2.m_bits);
}
inline BitSetUint64 Diff(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::DiffD(env, res.m_bits, bs2.m_bits);
return res;
}
inline void RemoveElemD(Env env, unsigned i)
{
CheckEpoch(env);
Uint64BitSetOps::RemoveElemD(env, m_bits, i);
}
inline BitSetUint64 RemoveElem(Env env, unsigned i) const
{
CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::RemoveElemD(env, res.m_bits, i);
return res;
}
inline void AddElemD(Env env, unsigned i)
{
CheckEpoch(env);
Uint64BitSetOps::AddElemD(env, m_bits, i);
}
inline BitSetUint64 AddElem(Env env, unsigned i) const
{
CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::AddElemD(env, res.m_bits, i);
return res;
}
inline bool IsMember(Env env, unsigned i) const
{
CheckEpoch(env);
return Uint64BitSetOps::IsMember(env, m_bits, i);
}
inline void IntersectionD(Env env, const BitSetUint64& bs2)
{
CheckEpoch(env);
bs2.CheckEpoch(env);
m_bits = m_bits & bs2.m_bits;
}
inline BitSetUint64 Intersection(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::IntersectionD(env, res.m_bits, bs2.m_bits);
return res;
}
inline bool IsEmptyUnion(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::IsEmptyUnion(env, m_bits, bs2.m_bits);
}
inline void UnionD(Env env, const BitSetUint64& bs2)
{
CheckEpoch(env);
bs2.CheckEpoch(env);
Uint64BitSetOps::UnionD(env, m_bits, bs2.m_bits);
}
inline BitSetUint64 Union(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::UnionD(env, res.m_bits, bs2.m_bits);
return res;
}
inline void ClearD(Env env)
{
assert(m_epoch == BitSetTraits::GetEpoch(env));
Uint64BitSetOps::ClearD(env, m_bits);
}
inline bool IsEmpty(Env env) const
{
CheckEpoch(env);
return Uint64BitSetOps::IsEmpty(env, m_bits);
}
inline void LivenessD(Env env, const BitSetUint64& def, const BitSetUint64& use, const BitSetUint64& out)
{
CheckEpoch(env);
def.CheckEpoch(env);
use.CheckEpoch(env);
out.CheckEpoch(env);
return Uint64BitSetOps::LivenessD(env, m_bits, def.m_bits, use.m_bits, out.m_bits);
}
inline bool IsSubset(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::IsSubset(env, m_bits, bs2.m_bits);
}
inline bool IsEmptyIntersection(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::IsEmptyIntersection(env, m_bits, bs2.m_bits);
}
inline bool Equal(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::Equal(env, m_bits, bs2.m_bits);
}
const char* ToString(Env env) const
{
return Uint64BitSetOps::ToString(env, m_bits);
}
public:
// Uninint
BitSetUint64()
: m_bits(0)
#ifdef DEBUG
, m_epoch(UINT32_MAX) // Undefined.
#endif
{
}
BitSetUint64(Env env, bool full = false)
: m_bits(0)
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
if (full)
{
m_bits = Uint64BitSetOps::MakeFull(env);
}
}
inline BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt);
BitSetUint64(Env env, unsigned bitNum)
: m_bits(Uint64BitSetOps::MakeSingleton(env, bitNum))
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
assert(bitNum < BitSetTraits::GetSize(env));
}
};
template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType
{
friend class BitSetUint64<Env, BitSetTraits>;
BitSetUint64<Env, BitSetTraits> m_bs;
public:
BitSetUint64ValueRetType(const BitSetUint64<Env, BitSetTraits>& bs) : m_bs(bs)
{
}
};
template <typename Env, typename BitSetTraits>
BitSetUint64<Env, BitSetTraits>::BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt)
: m_bits(rt.m_bs.m_bits)
#ifdef DEBUG
, m_epoch(rt.m_bs.m_epoch)
#endif
{
}
template <typename Env, typename BitSetTraits>
class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
/*Brand*/ BSUInt64Class,
/*Env*/ Env,
/*BitSetTraits*/ BitSetTraits>
{
typedef BitSetUint64<Env, BitSetTraits> BST;
typedef const BitSetUint64<Env, BitSetTraits>& BSTValArg;
typedef BitSetUint64ValueRetType<Env, BitSetTraits> BSTRetVal;
public:
static BSTRetVal UninitVal()
{
return BitSetUint64<Env, BitSetTraits>();
}
static bool MayBeUninit(BSTValArg bs)
{
return bs == UninitVal();
}
static void Assign(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void AssignNouninit(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void AssignAllowUninitRhs(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void AssignNoCopy(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void ClearD(Env env, BST& bs)
{
bs.ClearD(env);
}
static BSTRetVal MakeSingleton(Env env, unsigned bitNum)
{
assert(bitNum < BitSetTraits::GetSize(env));
return BST(env, bitNum);
}
static BSTRetVal MakeCopy(Env env, BSTValArg bs)
{
return bs;
}
static bool IsEmpty(Env env, BSTValArg bs)
{
return bs.IsEmpty(env);
}
static unsigned Count(Env env, BSTValArg bs)
{
return bs.Count(env);
}
static bool IsEmptyUnion(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.IsEmptyUnion(env, bs2);
}
static void UnionD(Env env, BST& bs1, BSTValArg bs2)
{
bs1.UnionD(env, bs2);
}
static BSTRetVal Union(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Union(env, bs2);
}
static void DiffD(Env env, BST& bs1, BSTValArg bs2)
{
bs1.DiffD(env, bs2);
}
static BSTRetVal Diff(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Diff(env, bs2);
}
static void RemoveElemD(Env env, BST& bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
bs1.RemoveElemD(env, i);
}
static BSTRetVal RemoveElem(Env env, BSTValArg bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
return bs1.RemoveElem(env, i);
}
static void AddElemD(Env env, BST& bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
bs1.AddElemD(env, i);
}
static BSTRetVal AddElem(Env env, BSTValArg bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
return bs1.AddElem(env, i);
}
static bool IsMember(Env env, BSTValArg bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
return bs1.IsMember(env, i);
}
static void IntersectionD(Env env, BST& bs1, BSTValArg bs2)
{
bs1.IntersectionD(env, bs2);
}
static BSTRetVal Intersection(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Intersection(env, bs2);
}
static bool IsEmptyIntersection(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.IsEmptyIntersection(env, bs2);
}
static void LivenessD(Env env, BST& in, BSTValArg def, BSTValArg use, BSTValArg out)
{
in.LivenessD(env, def, use, out);
}
static bool IsSubset(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.IsSubset(env, bs2);
}
static bool Equal(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Equal(env, bs2);
}
static bool NotEqual(Env env, BSTValArg bs1, BSTValArg bs2)
{
return !bs1.Equal(env, bs2);
}
static BSTRetVal MakeEmpty(Env env)
{
return BST(env);
}
static BSTRetVal MakeFull(Env env)
{
return BST(env, /*full*/ true);
}
#ifdef DEBUG
static const char* ToString(Env env, BSTValArg bs)
{
return bs.ToString(env);
}
#endif
// You *can* clear a bit after it's been iterated. But you shouldn't otherwise mutate the
// bitset during bit iteration.
class Iter
{
UINT64 m_bits;
unsigned m_bitNum;
public:
Iter(Env env, const BitSetUint64<Env, BitSetTraits>& bs) : m_bits(bs.m_bits), m_bitNum(0)
{
}
bool NextElem(unsigned* pElem)
{
static const unsigned UINT64_SIZE = 64;
if ((m_bits & 0x1) != 0)
{
*pElem = m_bitNum;
m_bitNum++;
m_bits >>= 1;
return true;
}
else
{
// Skip groups of 4 zeros -- an optimization for sparse bitsets.
while (m_bitNum < UINT64_SIZE && (m_bits & 0xf) == 0)
{
m_bitNum += 4;
m_bits >>= 4;
}
while (m_bitNum < UINT64_SIZE && (m_bits & 0x1) == 0)
{
m_bitNum += 1;
m_bits >>= 1;
}
if (m_bitNum < UINT64_SIZE)
{
*pElem = m_bitNum;
m_bitNum++;
m_bits >>= 1;
return true;
}
else
{
return false;
}
}
}
};
typedef const BitSetUint64<Env, BitSetTraits>& ValArgType;
typedef BitSetUint64ValueRetType<Env, BitSetTraits> RetValType;
};
#endif // bitSetAsUint64InClass_DEFINED
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef bitSetAsUint64InClass_DEFINED
#define bitSetAsUint64InClass_DEFINED 1
#include "bitset.h"
#include "bitsetasuint64.h"
#include "stdmacros.h"
template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType;
template <typename Env, typename BitSetTraits>
class BitSetUint64Iter;
template <typename Env, typename BitSetTraits>
class BitSetUint64
{
public:
typedef BitSetUint64<Env, BitSetTraits> Rep;
private:
friend class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
/*Brand*/ BSUInt64Class,
/*Env*/ Env,
/*BitSetTraits*/ BitSetTraits>;
friend class BitSetUint64ValueRetType<Env, BitSetTraits>;
UINT64 m_bits;
#ifdef DEBUG
unsigned m_epoch;
#endif
typedef BitSetOps<UINT64, BSUInt64, Env, BitSetTraits> Uint64BitSetOps;
void CheckEpoch(Env env) const
{
#ifdef DEBUG
assert(m_epoch == BitSetTraits::GetEpoch(env));
#endif
}
bool operator==(const BitSetUint64& bs) const
{
return m_bits == bs.m_bits
#ifdef DEBUG
&& m_epoch == bs.m_epoch
#endif
;
}
public:
BitSetUint64& operator=(const BitSetUint64& bs)
{
m_bits = bs.m_bits;
#ifdef DEBUG
m_epoch = bs.m_epoch;
#endif // DEBUG
return (*this);
}
BitSetUint64(const BitSetUint64& bs)
: m_bits(bs.m_bits)
#ifdef DEBUG
, m_epoch(bs.m_epoch)
#endif
{
}
private:
// Return the number of bits set in the BitSet.
inline unsigned Count(Env env) const
{
CheckEpoch(env);
return Uint64BitSetOps::Count(env, m_bits);
}
inline void DiffD(Env env, const BitSetUint64& bs2)
{
CheckEpoch(env);
bs2.CheckEpoch(env);
Uint64BitSetOps::DiffD(env, m_bits, bs2.m_bits);
}
inline BitSetUint64 Diff(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::DiffD(env, res.m_bits, bs2.m_bits);
return res;
}
inline void RemoveElemD(Env env, unsigned i)
{
CheckEpoch(env);
Uint64BitSetOps::RemoveElemD(env, m_bits, i);
}
inline BitSetUint64 RemoveElem(Env env, unsigned i) const
{
CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::RemoveElemD(env, res.m_bits, i);
return res;
}
inline void AddElemD(Env env, unsigned i)
{
CheckEpoch(env);
Uint64BitSetOps::AddElemD(env, m_bits, i);
}
inline BitSetUint64 AddElem(Env env, unsigned i) const
{
CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::AddElemD(env, res.m_bits, i);
return res;
}
inline bool IsMember(Env env, unsigned i) const
{
CheckEpoch(env);
return Uint64BitSetOps::IsMember(env, m_bits, i);
}
inline void IntersectionD(Env env, const BitSetUint64& bs2)
{
CheckEpoch(env);
bs2.CheckEpoch(env);
m_bits = m_bits & bs2.m_bits;
}
inline BitSetUint64 Intersection(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::IntersectionD(env, res.m_bits, bs2.m_bits);
return res;
}
inline bool IsEmptyUnion(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::IsEmptyUnion(env, m_bits, bs2.m_bits);
}
inline void UnionD(Env env, const BitSetUint64& bs2)
{
CheckEpoch(env);
bs2.CheckEpoch(env);
Uint64BitSetOps::UnionD(env, m_bits, bs2.m_bits);
}
inline BitSetUint64 Union(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
BitSetUint64 res(*this);
Uint64BitSetOps::UnionD(env, res.m_bits, bs2.m_bits);
return res;
}
inline void ClearD(Env env)
{
assert(m_epoch == BitSetTraits::GetEpoch(env));
Uint64BitSetOps::ClearD(env, m_bits);
}
inline bool IsEmpty(Env env) const
{
CheckEpoch(env);
return Uint64BitSetOps::IsEmpty(env, m_bits);
}
inline void LivenessD(Env env, const BitSetUint64& def, const BitSetUint64& use, const BitSetUint64& out)
{
CheckEpoch(env);
def.CheckEpoch(env);
use.CheckEpoch(env);
out.CheckEpoch(env);
return Uint64BitSetOps::LivenessD(env, m_bits, def.m_bits, use.m_bits, out.m_bits);
}
inline bool IsSubset(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::IsSubset(env, m_bits, bs2.m_bits);
}
inline bool IsEmptyIntersection(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::IsEmptyIntersection(env, m_bits, bs2.m_bits);
}
inline bool Equal(Env env, const BitSetUint64& bs2) const
{
CheckEpoch(env);
bs2.CheckEpoch(env);
return Uint64BitSetOps::Equal(env, m_bits, bs2.m_bits);
}
const char* ToString(Env env) const
{
return Uint64BitSetOps::ToString(env, m_bits);
}
public:
// Uninint
BitSetUint64()
: m_bits(0)
#ifdef DEBUG
, m_epoch(UINT32_MAX) // Undefined.
#endif
{
}
BitSetUint64(Env env, bool full = false)
: m_bits(0)
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
if (full)
{
m_bits = Uint64BitSetOps::MakeFull(env);
}
}
inline BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt);
BitSetUint64(Env env, unsigned bitNum)
: m_bits(Uint64BitSetOps::MakeSingleton(env, bitNum))
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
assert(bitNum < BitSetTraits::GetSize(env));
}
};
template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType
{
friend class BitSetUint64<Env, BitSetTraits>;
BitSetUint64<Env, BitSetTraits> m_bs;
public:
BitSetUint64ValueRetType(const BitSetUint64<Env, BitSetTraits>& bs) : m_bs(bs)
{
}
};
template <typename Env, typename BitSetTraits>
BitSetUint64<Env, BitSetTraits>::BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt)
: m_bits(rt.m_bs.m_bits)
#ifdef DEBUG
, m_epoch(rt.m_bs.m_epoch)
#endif
{
}
template <typename Env, typename BitSetTraits>
class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
/*Brand*/ BSUInt64Class,
/*Env*/ Env,
/*BitSetTraits*/ BitSetTraits>
{
typedef BitSetUint64<Env, BitSetTraits> BST;
typedef const BitSetUint64<Env, BitSetTraits>& BSTValArg;
typedef BitSetUint64ValueRetType<Env, BitSetTraits> BSTRetVal;
public:
static BSTRetVal UninitVal()
{
return BitSetUint64<Env, BitSetTraits>();
}
static bool MayBeUninit(BSTValArg bs)
{
return bs == UninitVal();
}
static void Assign(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void AssignNouninit(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void AssignAllowUninitRhs(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void AssignNoCopy(Env env, BST& lhs, BSTValArg rhs)
{
lhs = rhs;
}
static void ClearD(Env env, BST& bs)
{
bs.ClearD(env);
}
static BSTRetVal MakeSingleton(Env env, unsigned bitNum)
{
assert(bitNum < BitSetTraits::GetSize(env));
return BST(env, bitNum);
}
static BSTRetVal MakeCopy(Env env, BSTValArg bs)
{
return bs;
}
static bool IsEmpty(Env env, BSTValArg bs)
{
return bs.IsEmpty(env);
}
static unsigned Count(Env env, BSTValArg bs)
{
return bs.Count(env);
}
static bool IsEmptyUnion(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.IsEmptyUnion(env, bs2);
}
static void UnionD(Env env, BST& bs1, BSTValArg bs2)
{
bs1.UnionD(env, bs2);
}
static BSTRetVal Union(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Union(env, bs2);
}
static void DiffD(Env env, BST& bs1, BSTValArg bs2)
{
bs1.DiffD(env, bs2);
}
static BSTRetVal Diff(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Diff(env, bs2);
}
static void RemoveElemD(Env env, BST& bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
bs1.RemoveElemD(env, i);
}
static BSTRetVal RemoveElem(Env env, BSTValArg bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
return bs1.RemoveElem(env, i);
}
static void AddElemD(Env env, BST& bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
bs1.AddElemD(env, i);
}
static BSTRetVal AddElem(Env env, BSTValArg bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
return bs1.AddElem(env, i);
}
static bool IsMember(Env env, BSTValArg bs1, unsigned i)
{
assert(i < BitSetTraits::GetSize(env));
return bs1.IsMember(env, i);
}
static void IntersectionD(Env env, BST& bs1, BSTValArg bs2)
{
bs1.IntersectionD(env, bs2);
}
static BSTRetVal Intersection(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Intersection(env, bs2);
}
static bool IsEmptyIntersection(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.IsEmptyIntersection(env, bs2);
}
static void LivenessD(Env env, BST& in, BSTValArg def, BSTValArg use, BSTValArg out)
{
in.LivenessD(env, def, use, out);
}
static bool IsSubset(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.IsSubset(env, bs2);
}
static bool Equal(Env env, BSTValArg bs1, BSTValArg bs2)
{
return bs1.Equal(env, bs2);
}
static bool NotEqual(Env env, BSTValArg bs1, BSTValArg bs2)
{
return !bs1.Equal(env, bs2);
}
static BSTRetVal MakeEmpty(Env env)
{
return BST(env);
}
static BSTRetVal MakeFull(Env env)
{
return BST(env, /*full*/ true);
}
#ifdef DEBUG
static const char* ToString(Env env, BSTValArg bs)
{
return bs.ToString(env);
}
#endif
// You *can* clear a bit after it's been iterated. But you shouldn't otherwise mutate the
// bitset during bit iteration.
class Iter
{
UINT64 m_bits;
unsigned m_bitNum;
public:
Iter(Env env, const BitSetUint64<Env, BitSetTraits>& bs) : m_bits(bs.m_bits), m_bitNum(0)
{
}
bool NextElem(unsigned* pElem)
{
static const unsigned UINT64_SIZE = 64;
if ((m_bits & 0x1) != 0)
{
*pElem = m_bitNum;
m_bitNum++;
m_bits >>= 1;
return true;
}
else
{
// Skip groups of 4 zeros -- an optimization for sparse bitsets.
while (m_bitNum < UINT64_SIZE && (m_bits & 0xf) == 0)
{
m_bitNum += 4;
m_bits >>= 4;
}
while (m_bitNum < UINT64_SIZE && (m_bits & 0x1) == 0)
{
m_bitNum += 1;
m_bits >>= 1;
}
if (m_bitNum < UINT64_SIZE)
{
*pElem = m_bitNum;
m_bitNum++;
m_bits >>= 1;
return true;
}
else
{
return false;
}
}
}
};
typedef const BitSetUint64<Env, BitSetTraits>& ValArgType;
typedef BitSetUint64ValueRetType<Env, BitSetTraits> RetValType;
};
#endif // bitSetAsUint64InClass_DEFINED
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/tests/Interop/PInvoke/Decimal/DecimalTestNative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "xplatform.h"
#include <new>
struct DecimalWrapper
{
DECIMAL dec;
};
struct CurrencyWrapper
{
CURRENCY currency;
};
namespace
{
BOOL operator==(CURRENCY lhs, CURRENCY rhs)
{
return lhs.int64 == rhs.int64 ? TRUE : FALSE;
}
BOOL operator==(DECIMAL lhs, DECIMAL rhs)
{
return lhs.signscale == rhs.signscale && lhs.Hi32 == rhs.Hi32 && lhs.Lo64 == rhs.Lo64 ? TRUE : FALSE;
}
}
extern "C" DLL_EXPORT DECIMAL STDMETHODCALLTYPE CreateDecimalFromInt(int32_t i)
{
DECIMAL result;
result.Hi32 = 0;
result.Lo64 = abs(i);
result.sign = i < 0 ? 1 : 0;
result.scale = 0;
result.wReserved = 0;
return result;
}
extern "C" DLL_EXPORT LPDECIMAL STDMETHODCALLTYPE CreateLPDecimalFromInt(int32_t i)
{
return new (CoreClrAlloc(sizeof(DECIMAL))) DECIMAL(CreateDecimalFromInt(i));
}
extern "C" DLL_EXPORT DecimalWrapper STDMETHODCALLTYPE CreateWrappedDecimalFromInt(int32_t i)
{
return { CreateDecimalFromInt(i) };
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE DecimalEqualToInt(DECIMAL dec, int32_t i)
{
DECIMAL intDecimal = CreateDecimalFromInt(i);
return dec == intDecimal;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE LPDecimalEqualToInt(LPDECIMAL dec, int32_t i)
{
DECIMAL intDecimal = CreateDecimalFromInt(i);
return *dec == intDecimal;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE WrappedDecimalEqualToInt(DecimalWrapper dec, int32_t i)
{
DECIMAL intDecimal = CreateDecimalFromInt(i);
return dec.dec == intDecimal;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeDecimal(DECIMAL* dec, int32_t expected, int32_t newValue)
{
BOOL result = *dec == CreateDecimalFromInt(expected);
*dec = CreateDecimalFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeWrappedDecimal(DecimalWrapper* dec, int32_t expected, int32_t newValue)
{
BOOL result = dec->dec == CreateDecimalFromInt(expected);
dec->dec = CreateDecimalFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeLPDecimal(LPDECIMAL* dec, int32_t expected, int32_t newValue)
{
BOOL result = **dec == CreateDecimalFromInt(expected);
*dec = new(CoreClrAlloc(sizeof(DECIMAL))) DECIMAL(CreateDecimalFromInt(newValue));
return result;
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetDecimalForInt(int32_t i, DECIMAL* dec)
{
*dec = CreateDecimalFromInt(i);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetLPDecimalForInt(int32_t i, LPDECIMAL* dec)
{
*dec = new (CoreClrAlloc(sizeof(DECIMAL))) DECIMAL(CreateDecimalFromInt(i));
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetWrappedDecimalForInt(int32_t i, DecimalWrapper* dec)
{
dec->dec = CreateDecimalFromInt(i);
}
extern "C" DLL_EXPORT CURRENCY STDMETHODCALLTYPE CreateCurrencyFromInt(int32_t i)
{
CY currency;
currency.int64 = i * 10000;
return currency;
}
extern "C" DLL_EXPORT CurrencyWrapper STDMETHODCALLTYPE CreateWrappedCurrencyFromInt(int32_t i)
{
return { CreateCurrencyFromInt(i) };
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE CurrencyEqualToInt(CURRENCY currency, int32_t i)
{
CURRENCY intCurrency = CreateCurrencyFromInt(i);
return currency == intCurrency;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE WrappedCurrencyEqualToInt(CurrencyWrapper currency, int32_t i)
{
CURRENCY intCurrency = CreateCurrencyFromInt(i);
return currency.currency == intCurrency;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeCurrency(CURRENCY* currency, int32_t expected, int32_t newValue)
{
BOOL result = *currency == CreateCurrencyFromInt(expected);
*currency = CreateCurrencyFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeWrappedCurrency(CurrencyWrapper* currency, int32_t expected, int32_t newValue)
{
BOOL result = currency->currency == CreateCurrencyFromInt(expected);
currency->currency = CreateCurrencyFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetCurrencyForInt(int32_t i, CURRENCY* currency)
{
*currency = CreateCurrencyFromInt(i);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetWrappedCurrencyForInt(int32_t i, CurrencyWrapper* currency)
{
currency->currency = CreateCurrencyFromInt(i);
}
using DecimalCallback = void(STDMETHODCALLTYPE*)(DECIMAL);
extern "C" DLL_EXPORT void STDMETHODCALLTYPE PassThroughDecimalToCallback(DECIMAL dec, DecimalCallback cb)
{
cb(dec);
}
using LPDecimalCallback = void(STDMETHODCALLTYPE*)(LPDECIMAL);
extern "C" DLL_EXPORT void STDMETHODCALLTYPE PassThroughLPDecimalToCallback(LPDECIMAL dec, LPDecimalCallback cb)
{
cb(dec);
}
using CurrencyCallback = void(STDMETHODCALLTYPE*)(CURRENCY);
extern "C" DLL_EXPORT void STDMETHODCALLTYPE PassThroughCurrencyToCallback(CURRENCY cy, CurrencyCallback cb)
{
cb(cy);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "xplatform.h"
#include <new>
struct DecimalWrapper
{
DECIMAL dec;
};
struct CurrencyWrapper
{
CURRENCY currency;
};
namespace
{
BOOL operator==(CURRENCY lhs, CURRENCY rhs)
{
return lhs.int64 == rhs.int64 ? TRUE : FALSE;
}
BOOL operator==(DECIMAL lhs, DECIMAL rhs)
{
return lhs.signscale == rhs.signscale && lhs.Hi32 == rhs.Hi32 && lhs.Lo64 == rhs.Lo64 ? TRUE : FALSE;
}
}
extern "C" DLL_EXPORT DECIMAL STDMETHODCALLTYPE CreateDecimalFromInt(int32_t i)
{
DECIMAL result;
result.Hi32 = 0;
result.Lo64 = abs(i);
result.sign = i < 0 ? 1 : 0;
result.scale = 0;
result.wReserved = 0;
return result;
}
extern "C" DLL_EXPORT LPDECIMAL STDMETHODCALLTYPE CreateLPDecimalFromInt(int32_t i)
{
return new (CoreClrAlloc(sizeof(DECIMAL))) DECIMAL(CreateDecimalFromInt(i));
}
extern "C" DLL_EXPORT DecimalWrapper STDMETHODCALLTYPE CreateWrappedDecimalFromInt(int32_t i)
{
return { CreateDecimalFromInt(i) };
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE DecimalEqualToInt(DECIMAL dec, int32_t i)
{
DECIMAL intDecimal = CreateDecimalFromInt(i);
return dec == intDecimal;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE LPDecimalEqualToInt(LPDECIMAL dec, int32_t i)
{
DECIMAL intDecimal = CreateDecimalFromInt(i);
return *dec == intDecimal;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE WrappedDecimalEqualToInt(DecimalWrapper dec, int32_t i)
{
DECIMAL intDecimal = CreateDecimalFromInt(i);
return dec.dec == intDecimal;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeDecimal(DECIMAL* dec, int32_t expected, int32_t newValue)
{
BOOL result = *dec == CreateDecimalFromInt(expected);
*dec = CreateDecimalFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeWrappedDecimal(DecimalWrapper* dec, int32_t expected, int32_t newValue)
{
BOOL result = dec->dec == CreateDecimalFromInt(expected);
dec->dec = CreateDecimalFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeLPDecimal(LPDECIMAL* dec, int32_t expected, int32_t newValue)
{
BOOL result = **dec == CreateDecimalFromInt(expected);
*dec = new(CoreClrAlloc(sizeof(DECIMAL))) DECIMAL(CreateDecimalFromInt(newValue));
return result;
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetDecimalForInt(int32_t i, DECIMAL* dec)
{
*dec = CreateDecimalFromInt(i);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetLPDecimalForInt(int32_t i, LPDECIMAL* dec)
{
*dec = new (CoreClrAlloc(sizeof(DECIMAL))) DECIMAL(CreateDecimalFromInt(i));
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetWrappedDecimalForInt(int32_t i, DecimalWrapper* dec)
{
dec->dec = CreateDecimalFromInt(i);
}
extern "C" DLL_EXPORT CURRENCY STDMETHODCALLTYPE CreateCurrencyFromInt(int32_t i)
{
CY currency;
currency.int64 = i * 10000;
return currency;
}
extern "C" DLL_EXPORT CurrencyWrapper STDMETHODCALLTYPE CreateWrappedCurrencyFromInt(int32_t i)
{
return { CreateCurrencyFromInt(i) };
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE CurrencyEqualToInt(CURRENCY currency, int32_t i)
{
CURRENCY intCurrency = CreateCurrencyFromInt(i);
return currency == intCurrency;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE WrappedCurrencyEqualToInt(CurrencyWrapper currency, int32_t i)
{
CURRENCY intCurrency = CreateCurrencyFromInt(i);
return currency.currency == intCurrency;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeCurrency(CURRENCY* currency, int32_t expected, int32_t newValue)
{
BOOL result = *currency == CreateCurrencyFromInt(expected);
*currency = CreateCurrencyFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE ValidateAndChangeWrappedCurrency(CurrencyWrapper* currency, int32_t expected, int32_t newValue)
{
BOOL result = currency->currency == CreateCurrencyFromInt(expected);
currency->currency = CreateCurrencyFromInt(newValue);
return result;
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetCurrencyForInt(int32_t i, CURRENCY* currency)
{
*currency = CreateCurrencyFromInt(i);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetWrappedCurrencyForInt(int32_t i, CurrencyWrapper* currency)
{
currency->currency = CreateCurrencyFromInt(i);
}
using DecimalCallback = void(STDMETHODCALLTYPE*)(DECIMAL);
extern "C" DLL_EXPORT void STDMETHODCALLTYPE PassThroughDecimalToCallback(DECIMAL dec, DecimalCallback cb)
{
cb(dec);
}
using LPDecimalCallback = void(STDMETHODCALLTYPE*)(LPDECIMAL);
extern "C" DLL_EXPORT void STDMETHODCALLTYPE PassThroughLPDecimalToCallback(LPDECIMAL dec, LPDecimalCallback cb)
{
cb(dec);
}
using CurrencyCallback = void(STDMETHODCALLTYPE*)(CURRENCY);
extern "C" DLL_EXPORT void STDMETHODCALLTYPE PassThroughCurrencyToCallback(CURRENCY cy, CurrencyCallback cb)
{
cb(cy);
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/jit/error.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX error.cpp XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "compiler.h"
#if MEASURE_FATAL
unsigned fatal_badCode;
unsigned fatal_noWay;
unsigned fatal_implLimitation;
unsigned fatal_NOMEM;
unsigned fatal_noWayAssertBody;
#ifdef DEBUG
unsigned fatal_noWayAssertBodyArgs;
#endif // DEBUG
unsigned fatal_NYI;
#endif // MEASURE_FATAL
/*****************************************************************************/
void DECLSPEC_NORETURN fatal(int errCode)
{
#ifdef DEBUG
if (errCode != CORJIT_SKIPPED) // Don't stop on NYI: use COMPlus_AltJitAssertOnNYI for that.
{
if (JitConfig.DebugBreakOnVerificationFailure())
{
DebugBreak();
}
}
#endif // DEBUG
ULONG_PTR exceptArg = errCode;
RaiseException(FATAL_JIT_EXCEPTION, EXCEPTION_NONCONTINUABLE, 1, &exceptArg);
UNREACHABLE();
}
/*****************************************************************************/
void DECLSPEC_NORETURN badCode()
{
#if MEASURE_FATAL
fatal_badCode += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_BADCODE);
}
/*****************************************************************************/
void DECLSPEC_NORETURN noWay()
{
#if MEASURE_FATAL
fatal_noWay += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_INTERNALERROR);
}
/*****************************************************************************/
void DECLSPEC_NORETURN implLimitation()
{
#if MEASURE_FATAL
fatal_implLimitation += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_IMPLLIMITATION);
}
/*****************************************************************************/
void DECLSPEC_NORETURN NOMEM()
{
#if MEASURE_FATAL
fatal_NOMEM += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_OUTOFMEM);
}
/*****************************************************************************/
void DECLSPEC_NORETURN noWayAssertBody()
{
#if MEASURE_FATAL
fatal_noWayAssertBody += 1;
#endif // MEASURE_FATAL
#ifndef DEBUG
// Even in retail, if we hit a noway, and we have this variable set, we don't want to fall back
// to MinOpts, which might hide a regression. Instead, hit a breakpoint (and crash). We don't
// have the assert code to fall back on here.
// The debug path goes through this function also, to do the call to 'fatal'.
// This kind of noway is hit for unreached().
if (JitConfig.JitEnableNoWayAssert())
{
DebugBreak();
}
#endif // !DEBUG
fatal(CORJIT_RECOVERABLEERROR);
}
inline static bool ShouldThrowOnNoway(
#ifdef FEATURE_TRACELOGGING
const char* filename, unsigned line
#endif
)
{
return JitTls::GetCompiler() == nullptr ||
JitTls::GetCompiler()->compShouldThrowOnNoway(
#ifdef FEATURE_TRACELOGGING
filename, line
#endif
);
}
/*****************************************************************************/
void noWayAssertBodyConditional(
#ifdef FEATURE_TRACELOGGING
const char* filename, unsigned line
#endif
)
{
#ifdef FEATURE_TRACELOGGING
if (ShouldThrowOnNoway(filename, line))
#else
if (ShouldThrowOnNoway())
#endif // FEATURE_TRACELOGGING
{
noWayAssertBody();
}
}
/*****************************************************************************/
void notYetImplemented(const char* msg, const char* filename, unsigned line)
{
Compiler* pCompiler = JitTls::GetCompiler();
if ((pCompiler == nullptr) || (pCompiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)))
{
NOWAY_MSG_FILE_AND_LINE(msg, filename, line);
return;
}
#if FUNC_INFO_LOGGING
#ifdef DEBUG
LogEnv* env = JitTls::GetLogEnv();
if (env != nullptr)
{
const Compiler* const pCompiler = env->compiler;
if (pCompiler->verbose)
{
printf("\n\n%s - NYI (%s:%d - %s)\n", pCompiler->info.compFullName, filename, line, msg);
}
}
if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "%s - NYI (%s:%d - %s)\n",
(env == nullptr) ? "UNKNOWN" : env->compiler->info.compFullName, filename, line, msg);
fflush(Compiler::compJitFuncInfoFile);
}
#else // !DEBUG
if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "NYI (%s:%d - %s)\n", filename, line, msg);
fflush(Compiler::compJitFuncInfoFile);
}
#endif // !DEBUG
#endif // FUNC_INFO_LOGGING
#ifdef DEBUG
// Assume we're within a compFunctionTrace boundary, which might not be true.
pCompiler->compFunctionTraceEnd(nullptr, 0, true);
#endif // DEBUG
DWORD value = JitConfig.AltJitAssertOnNYI();
// 0 means just silently skip
// If we are in retail builds, assume ignore
// 1 means popup the assert (abort=abort, retry=debugger, ignore=skip)
// 2 means silently don't skip (same as 3 for retail)
// 3 means popup the assert (abort=abort, retry=debugger, ignore=don't skip)
if (value & 1)
{
#ifdef DEBUG
assertAbort(msg, filename, line);
#endif
}
if ((value & 2) == 0)
{
#if MEASURE_FATAL
fatal_NYI += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_SKIPPED);
}
}
/*****************************************************************************/
LONG __JITfilter(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
DWORD exceptCode = pExceptionPointers->ExceptionRecord->ExceptionCode;
if (exceptCode == FATAL_JIT_EXCEPTION)
{
ErrorTrapParam* pParam = (ErrorTrapParam*)lpvParam;
assert(pExceptionPointers->ExceptionRecord->NumberParameters == 1);
pParam->errc = (int)pExceptionPointers->ExceptionRecord->ExceptionInformation[0];
ICorJitInfo* jitInfo = pParam->jitInfo;
if (jitInfo != nullptr)
{
jitInfo->reportFatalError((CorJitResult)pParam->errc);
}
return EXCEPTION_EXECUTE_HANDLER;
}
return EXCEPTION_CONTINUE_SEARCH;
}
/*****************************************************************************/
#ifdef DEBUG
DWORD getBreakOnBadCode()
{
return JitConfig.JitBreakOnBadCode();
}
/*****************************************************************************/
void debugError(const char* msg, const char* file, unsigned line)
{
const char* tail = strrchr(file, '\\');
if (tail != nullptr)
{
tail = tail + 1;
}
else
{
tail = file;
}
LogEnv* env = JitTls::GetLogEnv();
logf(LL_ERROR, "COMPILATION FAILED: file: %s:%d compiling method %s reason %s\n", tail, line,
env->compiler->info.compFullName, msg);
// We now only assert when user explicitly set ComPlus_JitRequired=1
// If ComPlus_JitRequired is 0 or is not set, we will not assert.
if (JitConfig.JitRequired() == 1 || getBreakOnBadCode())
{
assertAbort(msg, file, line);
}
BreakIfDebuggerPresent();
}
/*****************************************************************************/
LogEnv::LogEnv(ICorJitInfo* aCompHnd) : compHnd(aCompHnd), compiler(nullptr)
{
}
/*****************************************************************************/
extern "C" void __cdecl assertAbort(const char* why, const char* file, unsigned line)
{
const char* msg = why;
LogEnv* env = JitTls::GetLogEnv();
const int BUFF_SIZE = 8192;
char* buff = (char*)_alloca(BUFF_SIZE);
const char* phaseName = "unknown phase";
if (env->compiler)
{
phaseName = PhaseNames[env->compiler->mostRecentlyActivePhase];
_snprintf_s(buff, BUFF_SIZE, _TRUNCATE,
"Assertion failed '%s' in '%s' during '%s' (IL size %d; hash 0x%08x; %s)\n", why,
env->compiler->info.compFullName, phaseName, env->compiler->info.compILCodeSize,
env->compiler->info.compMethodHash(), env->compiler->compGetTieringName(/* short name */ true));
msg = buff;
}
printf(""); // null string means flush
#if FUNC_INFO_LOGGING
if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "%s - Assertion failed (%s:%d - %s) during %s\n",
(env == nullptr) ? "UNKNOWN" : env->compiler->info.compFullName, file, line, why, phaseName);
}
#endif // FUNC_INFO_LOGGING
if (env->compHnd->doAssert(file, line, msg))
{
DebugBreak();
}
Compiler* comp = JitTls::GetCompiler();
if (comp != nullptr && comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT))
{
// If we hit an assert, and we got here, it's either because the user hit "ignore" on the
// dialog pop-up, or they set COMPlus_ContinueOnAssert=1 to not emit a pop-up, but just continue.
// If we're an altjit, we have two options: (1) silently continue, as a normal JIT would, probably
// leading to additional asserts, or (2) tell the VM that the AltJit wants to skip this function,
// thus falling back to the fallback JIT. Setting COMPlus_AltJitSkipOnAssert=1 chooses this "skip"
// to the fallback JIT behavior. This is useful when doing ASM diffs, where we only want to see
// the first assert for any function, but we don't want to kill the whole ngen process on the
// first assert (which would happen if you used COMPlus_NoGuiOnAssert=1 for example).
if (JitConfig.AltJitSkipOnAssert() != 0)
{
fatal(CORJIT_SKIPPED);
}
}
}
/*********************************************************************/
bool vlogf(unsigned level, const char* fmt, va_list args)
{
return JitTls::GetLogEnv()->compHnd->logMsg(level, fmt, args);
}
int vflogf(FILE* file, const char* fmt, va_list args)
{
// 0-length string means flush
if (fmt[0] == '\0')
{
fflush(file);
return 0;
}
const int BUFF_SIZE = 8192;
char buffer[BUFF_SIZE];
int written = _vsnprintf_s(&buffer[0], BUFF_SIZE, _TRUNCATE, fmt, args);
if (JitConfig.JitDumpToDebugger())
{
OutputDebugStringA(buffer);
}
// We use fputs here so that this executes as fast a possible
fputs(&buffer[0], file);
return written;
}
int flogf(FILE* file, const char* fmt, ...)
{
va_list args;
va_start(args, fmt);
int written = vflogf(file, fmt, args);
va_end(args);
return written;
}
/*********************************************************************/
int logf(const char* fmt, ...)
{
va_list args;
static bool logToEEfailed = false;
int written = 0;
//
// We remember when the EE failed to log, because vlogf()
// is very slow in a checked build.
//
// If it fails to log an LL_INFO1000 message once
// it will always fail when logging an LL_INFO1000 message.
//
if (!logToEEfailed)
{
va_start(args, fmt);
if (!vlogf(LL_INFO1000, fmt, args))
{
logToEEfailed = true;
}
va_end(args);
}
if (logToEEfailed)
{
// if the EE refuses to log it, we try to send it to stdout
va_start(args, fmt);
written = vflogf(jitstdout, fmt, args);
va_end(args);
}
#if 0 // Enable this only when you need it
else
{
//
// The EE just successfully logged our message
//
static ConfigDWORD fJitBreakOnDumpToken;
DWORD breakOnDumpToken = fJitBreakOnDumpToken.val(CLRConfig::INTERNAL_BreakOnDumpToken);
static DWORD forbidEntry = 0;
if ((breakOnDumpToken != 0xffffffff) && (forbidEntry == 0))
{
forbidEntry = 1;
// Use value of 0 to get the dump
static DWORD currentLine = 1;
if (currentLine == breakOnDumpToken)
{
assert(!"Dump token reached");
}
printf("(Token=0x%x) ", currentLine++);
forbidEntry = 0;
}
}
#endif // 0
va_end(args);
return written;
}
/*********************************************************************/
void gcDump_logf(const char* fmt, ...)
{
va_list args;
static bool logToEEfailed = false;
//
// We remember when the EE failed to log, because vlogf()
// is very slow in a checked build.
//
// If it fails to log an LL_INFO1000 message once
// it will always fail when logging an LL_INFO1000 message.
//
if (!logToEEfailed)
{
va_start(args, fmt);
if (!vlogf(LL_INFO1000, fmt, args))
{
logToEEfailed = true;
}
va_end(args);
}
if (logToEEfailed)
{
// if the EE refuses to log it, we try to send it to stdout
va_start(args, fmt);
vflogf(jitstdout, fmt, args);
va_end(args);
}
#if 0 // Enable this only when you need it
else
{
//
// The EE just successfully logged our message
//
static ConfigDWORD fJitBreakOnDumpToken;
DWORD breakOnDumpToken = fJitBreakOnDumpToken.val(CLRConfig::INTERNAL_BreakOnDumpToken);
static DWORD forbidEntry = 0;
if ((breakOnDumpToken != 0xffffffff) && (forbidEntry == 0))
{
forbidEntry = 1;
// Use value of 0 to get the dump
static DWORD currentLine = 1;
if (currentLine == breakOnDumpToken)
{
assert(!"Dump token reached");
}
printf("(Token=0x%x) ", currentLine++);
forbidEntry = 0;
}
}
#endif // 0
va_end(args);
}
/*********************************************************************/
void logf(unsigned level, const char* fmt, ...)
{
va_list args;
va_start(args, fmt);
vlogf(level, fmt, args);
va_end(args);
}
void DECLSPEC_NORETURN badCode3(const char* msg, const char* msg2, int arg, _In_z_ const char* file, unsigned line)
{
const int BUFF_SIZE = 512;
char buf1[BUFF_SIZE];
char buf2[BUFF_SIZE];
sprintf_s(buf1, BUFF_SIZE, "%s%s", msg, msg2);
sprintf_s(buf2, BUFF_SIZE, buf1, arg);
debugError(buf2, file, line);
badCode();
}
void noWayAssertAbortHelper(const char* cond, const char* file, unsigned line)
{
// Show the assert UI.
if (JitConfig.JitEnableNoWayAssert())
{
assertAbort(cond, file, line);
}
}
void noWayAssertBodyConditional(const char* cond, const char* file, unsigned line)
{
#ifdef FEATURE_TRACELOGGING
if (ShouldThrowOnNoway(file, line))
#else
if (ShouldThrowOnNoway())
#endif
{
noWayAssertBody(cond, file, line);
}
// In CHK we want the assert UI to show up in min-opts.
else
{
noWayAssertAbortHelper(cond, file, line);
}
}
void DECLSPEC_NORETURN noWayAssertBody(const char* cond, const char* file, unsigned line)
{
#if MEASURE_FATAL
fatal_noWayAssertBodyArgs += 1;
#endif // MEASURE_FATAL
noWayAssertAbortHelper(cond, file, line);
noWayAssertBody();
}
#endif // DEBUG
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX error.cpp XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "compiler.h"
#if MEASURE_FATAL
unsigned fatal_badCode;
unsigned fatal_noWay;
unsigned fatal_implLimitation;
unsigned fatal_NOMEM;
unsigned fatal_noWayAssertBody;
#ifdef DEBUG
unsigned fatal_noWayAssertBodyArgs;
#endif // DEBUG
unsigned fatal_NYI;
#endif // MEASURE_FATAL
/*****************************************************************************/
void DECLSPEC_NORETURN fatal(int errCode)
{
#ifdef DEBUG
if (errCode != CORJIT_SKIPPED) // Don't stop on NYI: use COMPlus_AltJitAssertOnNYI for that.
{
if (JitConfig.DebugBreakOnVerificationFailure())
{
DebugBreak();
}
}
#endif // DEBUG
ULONG_PTR exceptArg = errCode;
RaiseException(FATAL_JIT_EXCEPTION, EXCEPTION_NONCONTINUABLE, 1, &exceptArg);
UNREACHABLE();
}
/*****************************************************************************/
void DECLSPEC_NORETURN badCode()
{
#if MEASURE_FATAL
fatal_badCode += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_BADCODE);
}
/*****************************************************************************/
void DECLSPEC_NORETURN noWay()
{
#if MEASURE_FATAL
fatal_noWay += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_INTERNALERROR);
}
/*****************************************************************************/
void DECLSPEC_NORETURN implLimitation()
{
#if MEASURE_FATAL
fatal_implLimitation += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_IMPLLIMITATION);
}
/*****************************************************************************/
void DECLSPEC_NORETURN NOMEM()
{
#if MEASURE_FATAL
fatal_NOMEM += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_OUTOFMEM);
}
/*****************************************************************************/
void DECLSPEC_NORETURN noWayAssertBody()
{
#if MEASURE_FATAL
fatal_noWayAssertBody += 1;
#endif // MEASURE_FATAL
#ifndef DEBUG
// Even in retail, if we hit a noway, and we have this variable set, we don't want to fall back
// to MinOpts, which might hide a regression. Instead, hit a breakpoint (and crash). We don't
// have the assert code to fall back on here.
// The debug path goes through this function also, to do the call to 'fatal'.
// This kind of noway is hit for unreached().
if (JitConfig.JitEnableNoWayAssert())
{
DebugBreak();
}
#endif // !DEBUG
fatal(CORJIT_RECOVERABLEERROR);
}
inline static bool ShouldThrowOnNoway(
#ifdef FEATURE_TRACELOGGING
const char* filename, unsigned line
#endif
)
{
return JitTls::GetCompiler() == nullptr ||
JitTls::GetCompiler()->compShouldThrowOnNoway(
#ifdef FEATURE_TRACELOGGING
filename, line
#endif
);
}
/*****************************************************************************/
void noWayAssertBodyConditional(
#ifdef FEATURE_TRACELOGGING
const char* filename, unsigned line
#endif
)
{
#ifdef FEATURE_TRACELOGGING
if (ShouldThrowOnNoway(filename, line))
#else
if (ShouldThrowOnNoway())
#endif // FEATURE_TRACELOGGING
{
noWayAssertBody();
}
}
/*****************************************************************************/
void notYetImplemented(const char* msg, const char* filename, unsigned line)
{
Compiler* pCompiler = JitTls::GetCompiler();
if ((pCompiler == nullptr) || (pCompiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)))
{
NOWAY_MSG_FILE_AND_LINE(msg, filename, line);
return;
}
#if FUNC_INFO_LOGGING
#ifdef DEBUG
LogEnv* env = JitTls::GetLogEnv();
if (env != nullptr)
{
const Compiler* const pCompiler = env->compiler;
if (pCompiler->verbose)
{
printf("\n\n%s - NYI (%s:%d - %s)\n", pCompiler->info.compFullName, filename, line, msg);
}
}
if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "%s - NYI (%s:%d - %s)\n",
(env == nullptr) ? "UNKNOWN" : env->compiler->info.compFullName, filename, line, msg);
fflush(Compiler::compJitFuncInfoFile);
}
#else // !DEBUG
if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "NYI (%s:%d - %s)\n", filename, line, msg);
fflush(Compiler::compJitFuncInfoFile);
}
#endif // !DEBUG
#endif // FUNC_INFO_LOGGING
#ifdef DEBUG
// Assume we're within a compFunctionTrace boundary, which might not be true.
pCompiler->compFunctionTraceEnd(nullptr, 0, true);
#endif // DEBUG
DWORD value = JitConfig.AltJitAssertOnNYI();
// 0 means just silently skip
// If we are in retail builds, assume ignore
// 1 means popup the assert (abort=abort, retry=debugger, ignore=skip)
// 2 means silently don't skip (same as 3 for retail)
// 3 means popup the assert (abort=abort, retry=debugger, ignore=don't skip)
if (value & 1)
{
#ifdef DEBUG
assertAbort(msg, filename, line);
#endif
}
if ((value & 2) == 0)
{
#if MEASURE_FATAL
fatal_NYI += 1;
#endif // MEASURE_FATAL
fatal(CORJIT_SKIPPED);
}
}
/*****************************************************************************/
LONG __JITfilter(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
DWORD exceptCode = pExceptionPointers->ExceptionRecord->ExceptionCode;
if (exceptCode == FATAL_JIT_EXCEPTION)
{
ErrorTrapParam* pParam = (ErrorTrapParam*)lpvParam;
assert(pExceptionPointers->ExceptionRecord->NumberParameters == 1);
pParam->errc = (int)pExceptionPointers->ExceptionRecord->ExceptionInformation[0];
ICorJitInfo* jitInfo = pParam->jitInfo;
if (jitInfo != nullptr)
{
jitInfo->reportFatalError((CorJitResult)pParam->errc);
}
return EXCEPTION_EXECUTE_HANDLER;
}
return EXCEPTION_CONTINUE_SEARCH;
}
/*****************************************************************************/
#ifdef DEBUG
DWORD getBreakOnBadCode()
{
return JitConfig.JitBreakOnBadCode();
}
/*****************************************************************************/
void debugError(const char* msg, const char* file, unsigned line)
{
const char* tail = strrchr(file, '\\');
if (tail != nullptr)
{
tail = tail + 1;
}
else
{
tail = file;
}
LogEnv* env = JitTls::GetLogEnv();
logf(LL_ERROR, "COMPILATION FAILED: file: %s:%d compiling method %s reason %s\n", tail, line,
env->compiler->info.compFullName, msg);
// We now only assert when user explicitly set ComPlus_JitRequired=1
// If ComPlus_JitRequired is 0 or is not set, we will not assert.
if (JitConfig.JitRequired() == 1 || getBreakOnBadCode())
{
assertAbort(msg, file, line);
}
BreakIfDebuggerPresent();
}
/*****************************************************************************/
LogEnv::LogEnv(ICorJitInfo* aCompHnd) : compHnd(aCompHnd), compiler(nullptr)
{
}
/*****************************************************************************/
extern "C" void __cdecl assertAbort(const char* why, const char* file, unsigned line)
{
const char* msg = why;
LogEnv* env = JitTls::GetLogEnv();
const int BUFF_SIZE = 8192;
char* buff = (char*)_alloca(BUFF_SIZE);
const char* phaseName = "unknown phase";
if (env->compiler)
{
phaseName = PhaseNames[env->compiler->mostRecentlyActivePhase];
_snprintf_s(buff, BUFF_SIZE, _TRUNCATE,
"Assertion failed '%s' in '%s' during '%s' (IL size %d; hash 0x%08x; %s)\n", why,
env->compiler->info.compFullName, phaseName, env->compiler->info.compILCodeSize,
env->compiler->info.compMethodHash(), env->compiler->compGetTieringName(/* short name */ true));
msg = buff;
}
printf(""); // null string means flush
#if FUNC_INFO_LOGGING
if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "%s - Assertion failed (%s:%d - %s) during %s\n",
(env == nullptr) ? "UNKNOWN" : env->compiler->info.compFullName, file, line, why, phaseName);
}
#endif // FUNC_INFO_LOGGING
if (env->compHnd->doAssert(file, line, msg))
{
DebugBreak();
}
Compiler* comp = JitTls::GetCompiler();
if (comp != nullptr && comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT))
{
// If we hit an assert, and we got here, it's either because the user hit "ignore" on the
// dialog pop-up, or they set COMPlus_ContinueOnAssert=1 to not emit a pop-up, but just continue.
// If we're an altjit, we have two options: (1) silently continue, as a normal JIT would, probably
// leading to additional asserts, or (2) tell the VM that the AltJit wants to skip this function,
// thus falling back to the fallback JIT. Setting COMPlus_AltJitSkipOnAssert=1 chooses this "skip"
// to the fallback JIT behavior. This is useful when doing ASM diffs, where we only want to see
// the first assert for any function, but we don't want to kill the whole ngen process on the
// first assert (which would happen if you used COMPlus_NoGuiOnAssert=1 for example).
if (JitConfig.AltJitSkipOnAssert() != 0)
{
fatal(CORJIT_SKIPPED);
}
}
}
/*********************************************************************/
bool vlogf(unsigned level, const char* fmt, va_list args)
{
return JitTls::GetLogEnv()->compHnd->logMsg(level, fmt, args);
}
int vflogf(FILE* file, const char* fmt, va_list args)
{
// 0-length string means flush
if (fmt[0] == '\0')
{
fflush(file);
return 0;
}
const int BUFF_SIZE = 8192;
char buffer[BUFF_SIZE];
int written = _vsnprintf_s(&buffer[0], BUFF_SIZE, _TRUNCATE, fmt, args);
if (JitConfig.JitDumpToDebugger())
{
OutputDebugStringA(buffer);
}
// We use fputs here so that this executes as fast a possible
fputs(&buffer[0], file);
return written;
}
int flogf(FILE* file, const char* fmt, ...)
{
va_list args;
va_start(args, fmt);
int written = vflogf(file, fmt, args);
va_end(args);
return written;
}
/*********************************************************************/
int logf(const char* fmt, ...)
{
va_list args;
static bool logToEEfailed = false;
int written = 0;
//
// We remember when the EE failed to log, because vlogf()
// is very slow in a checked build.
//
// If it fails to log an LL_INFO1000 message once
// it will always fail when logging an LL_INFO1000 message.
//
if (!logToEEfailed)
{
va_start(args, fmt);
if (!vlogf(LL_INFO1000, fmt, args))
{
logToEEfailed = true;
}
va_end(args);
}
if (logToEEfailed)
{
// if the EE refuses to log it, we try to send it to stdout
va_start(args, fmt);
written = vflogf(jitstdout, fmt, args);
va_end(args);
}
#if 0 // Enable this only when you need it
else
{
//
// The EE just successfully logged our message
//
static ConfigDWORD fJitBreakOnDumpToken;
DWORD breakOnDumpToken = fJitBreakOnDumpToken.val(CLRConfig::INTERNAL_BreakOnDumpToken);
static DWORD forbidEntry = 0;
if ((breakOnDumpToken != 0xffffffff) && (forbidEntry == 0))
{
forbidEntry = 1;
// Use value of 0 to get the dump
static DWORD currentLine = 1;
if (currentLine == breakOnDumpToken)
{
assert(!"Dump token reached");
}
printf("(Token=0x%x) ", currentLine++);
forbidEntry = 0;
}
}
#endif // 0
va_end(args);
return written;
}
/*********************************************************************/
void gcDump_logf(const char* fmt, ...)
{
va_list args;
static bool logToEEfailed = false;
//
// We remember when the EE failed to log, because vlogf()
// is very slow in a checked build.
//
// If it fails to log an LL_INFO1000 message once
// it will always fail when logging an LL_INFO1000 message.
//
if (!logToEEfailed)
{
va_start(args, fmt);
if (!vlogf(LL_INFO1000, fmt, args))
{
logToEEfailed = true;
}
va_end(args);
}
if (logToEEfailed)
{
// if the EE refuses to log it, we try to send it to stdout
va_start(args, fmt);
vflogf(jitstdout, fmt, args);
va_end(args);
}
#if 0 // Enable this only when you need it
else
{
//
// The EE just successfully logged our message
//
static ConfigDWORD fJitBreakOnDumpToken;
DWORD breakOnDumpToken = fJitBreakOnDumpToken.val(CLRConfig::INTERNAL_BreakOnDumpToken);
static DWORD forbidEntry = 0;
if ((breakOnDumpToken != 0xffffffff) && (forbidEntry == 0))
{
forbidEntry = 1;
// Use value of 0 to get the dump
static DWORD currentLine = 1;
if (currentLine == breakOnDumpToken)
{
assert(!"Dump token reached");
}
printf("(Token=0x%x) ", currentLine++);
forbidEntry = 0;
}
}
#endif // 0
va_end(args);
}
/*********************************************************************/
void logf(unsigned level, const char* fmt, ...)
{
va_list args;
va_start(args, fmt);
vlogf(level, fmt, args);
va_end(args);
}
void DECLSPEC_NORETURN badCode3(const char* msg, const char* msg2, int arg, _In_z_ const char* file, unsigned line)
{
const int BUFF_SIZE = 512;
char buf1[BUFF_SIZE];
char buf2[BUFF_SIZE];
sprintf_s(buf1, BUFF_SIZE, "%s%s", msg, msg2);
sprintf_s(buf2, BUFF_SIZE, buf1, arg);
debugError(buf2, file, line);
badCode();
}
void noWayAssertAbortHelper(const char* cond, const char* file, unsigned line)
{
// Show the assert UI.
if (JitConfig.JitEnableNoWayAssert())
{
assertAbort(cond, file, line);
}
}
void noWayAssertBodyConditional(const char* cond, const char* file, unsigned line)
{
#ifdef FEATURE_TRACELOGGING
if (ShouldThrowOnNoway(file, line))
#else
if (ShouldThrowOnNoway())
#endif
{
noWayAssertBody(cond, file, line);
}
// In CHK we want the assert UI to show up in min-opts.
else
{
noWayAssertAbortHelper(cond, file, line);
}
}
void DECLSPEC_NORETURN noWayAssertBody(const char* cond, const char* file, unsigned line)
{
#if MEASURE_FATAL
fatal_noWayAssertBodyArgs += 1;
#endif // MEASURE_FATAL
noWayAssertAbortHelper(cond, file, line);
noWayAssertBody();
}
#endif // DEBUG
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/tools/superpmi/superpmi/jitinstance.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _JitInstance
#define _JitInstance
#include "superpmi.h"
#include "simpletimer.h"
#include "methodcontext.h"
#include "cycletimer.h"
class JitInstance
{
private:
char* PathToOriginalJit;
char* PathToTempJit;
HMODULE hLib;
PgetJit pngetJit;
PjitStartup pnjitStartup;
ICorJitHost* jitHost;
ICorJitInfo* icji;
SimpleTimer stj;
LightWeightMap<DWORD, DWORD>* forceOptions;
LightWeightMap<DWORD, DWORD>* options;
MethodContext::Environment environment;
JitInstance(){};
void timeResult(CORINFO_METHOD_INFO info, unsigned flags);
public:
bool forceClearAltJitFlag;
bool forceSetAltJitFlag;
enum Result
{
RESULT_ERROR,
RESULT_SUCCESS,
RESULT_MISSING
};
CycleTimer lt;
MethodContext* mc;
ULONGLONG times[2];
ICorJitCompiler* pJitInstance;
// Allocate and initialize the jit provided
static JitInstance* InitJit(char* nameOfJit,
bool breakOnAssert,
SimpleTimer* st1,
MethodContext* firstContext,
LightWeightMap<DWORD, DWORD>* forceOptions,
LightWeightMap<DWORD, DWORD>* options);
HRESULT StartUp(char* PathToJit, bool copyJit, bool breakOnDebugBreakorAV, MethodContext* firstContext);
bool reLoad(MethodContext* firstContext);
bool callJitStartup(ICorJitHost* newHost);
bool resetConfig(MethodContext* firstContext);
Result CompileMethod(MethodContext* MethodToCompile, int mcIndex, bool collectThroughput, class MetricsSummary* summary);
const WCHAR* getForceOption(const WCHAR* key);
const WCHAR* getOption(const WCHAR* key);
const WCHAR* getOption(const WCHAR* key, LightWeightMap<DWORD, DWORD>* options);
const MethodContext::Environment& getEnvironment();
void* allocateArray(size_t size);
void* allocateLongLivedArray(size_t size);
void freeArray(void* array);
void freeLongLivedArray(void* array);
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _JitInstance
#define _JitInstance
#include "superpmi.h"
#include "simpletimer.h"
#include "methodcontext.h"
#include "cycletimer.h"
class JitInstance
{
private:
char* PathToOriginalJit;
char* PathToTempJit;
HMODULE hLib;
PgetJit pngetJit;
PjitStartup pnjitStartup;
ICorJitHost* jitHost;
ICorJitInfo* icji;
SimpleTimer stj;
LightWeightMap<DWORD, DWORD>* forceOptions;
LightWeightMap<DWORD, DWORD>* options;
MethodContext::Environment environment;
JitInstance(){};
void timeResult(CORINFO_METHOD_INFO info, unsigned flags);
public:
bool forceClearAltJitFlag;
bool forceSetAltJitFlag;
enum Result
{
RESULT_ERROR,
RESULT_SUCCESS,
RESULT_MISSING
};
CycleTimer lt;
MethodContext* mc;
ULONGLONG times[2];
ICorJitCompiler* pJitInstance;
// Allocate and initialize the jit provided
static JitInstance* InitJit(char* nameOfJit,
bool breakOnAssert,
SimpleTimer* st1,
MethodContext* firstContext,
LightWeightMap<DWORD, DWORD>* forceOptions,
LightWeightMap<DWORD, DWORD>* options);
HRESULT StartUp(char* PathToJit, bool copyJit, bool breakOnDebugBreakorAV, MethodContext* firstContext);
bool reLoad(MethodContext* firstContext);
bool callJitStartup(ICorJitHost* newHost);
bool resetConfig(MethodContext* firstContext);
Result CompileMethod(MethodContext* MethodToCompile, int mcIndex, bool collectThroughput, class MetricsSummary* summary);
const WCHAR* getForceOption(const WCHAR* key);
const WCHAR* getOption(const WCHAR* key);
const WCHAR* getOption(const WCHAR* key, LightWeightMap<DWORD, DWORD>* options);
const MethodContext::Environment& getEnvironment();
void* allocateArray(size_t size);
void* allocateLongLivedArray(size_t size);
void freeArray(void* array);
void freeLongLivedArray(void* array);
};
#endif
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/debug/di/shimpriv.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// shimprivate.h
//
//
// private header for RS shim which bridges from V2 to V3.
//*****************************************************************************
#ifndef SHIMPRIV_H
#define SHIMPRIV_H
#include "helpers.h"
#include "shimdatatarget.h"
#include <shash.h>
// Forward declarations
class CordbWin32EventThread;
class Cordb;
class ShimStackWalk;
class ShimChain;
class ShimChainEnum;
class ShimFrameEnum;
// This struct specifies that it's a hash table of ShimStackWalk * using ICorDebugThread as the key.
struct ShimStackWalkHashTableTraits : public PtrSHashTraits<ShimStackWalk, ICorDebugThread *> {};
typedef SHash<ShimStackWalkHashTableTraits> ShimStackWalkHashTable;
//---------------------------------------------------------------------------------------
//
// Simple struct for storing a void *. This is to be used with a SHash hash table.
//
struct DuplicateCreationEventEntry
{
public:
DuplicateCreationEventEntry(void * pKey) : m_pKey(pKey) {};
// These functions must be defined for DuplicateCreationEventsHashTableTraits.
void * GetKey() {return m_pKey;};
static UINT32 Hash(void * pKey) {return (UINT32)(size_t)pKey;};
private:
void * m_pKey;
};
// This struct specifies that it's a hash table of DuplicateCreationEventEntry * using a void * as the key.
// The void * is expected to be an ICDProcess/ICDAppDomain/ICDThread/ICDAssembly/ICDThread interface pointer.
struct DuplicateCreationEventsHashTableTraits : public PtrSHashTraits<DuplicateCreationEventEntry, void *> {};
typedef SHash<DuplicateCreationEventsHashTableTraits> DuplicateCreationEventsHashTable;
//
// Callback that shim provides, which then queues up the events.
//
class ShimProxyCallback :
public ICorDebugManagedCallback,
public ICorDebugManagedCallback2,
public ICorDebugManagedCallback3,
public ICorDebugManagedCallback4
{
ShimProcess * m_pShim; // weak reference
LONG m_cRef;
public:
ShimProxyCallback(ShimProcess * pShim);
virtual ~ShimProxyCallback() {}
// Implement IUnknown
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void **ppInterface);
//
// Implementation of ICorDebugManagedCallback
//
COM_METHOD Breakpoint( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugBreakpoint *pBreakpoint);
COM_METHOD StepComplete( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugStepper *pStepper,
CorDebugStepReason reason);
COM_METHOD Break( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *thread);
COM_METHOD Exception( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
BOOL unhandled);
COM_METHOD EvalComplete( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugEval *pEval);
COM_METHOD EvalException( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugEval *pEval);
COM_METHOD CreateProcess( ICorDebugProcess *pProcess);
void QueueCreateProcess( ICorDebugProcess *pProcess);
COM_METHOD ExitProcess( ICorDebugProcess *pProcess);
COM_METHOD CreateThread( ICorDebugAppDomain *pAppDomain, ICorDebugThread *thread);
COM_METHOD ExitThread( ICorDebugAppDomain *pAppDomain, ICorDebugThread *thread);
COM_METHOD LoadModule( ICorDebugAppDomain *pAppDomain, ICorDebugModule *pModule);
void FakeLoadModule(ICorDebugAppDomain *pAppDomain, ICorDebugModule *pModule);
COM_METHOD UnloadModule( ICorDebugAppDomain *pAppDomain, ICorDebugModule *pModule);
COM_METHOD LoadClass( ICorDebugAppDomain *pAppDomain, ICorDebugClass *c);
COM_METHOD UnloadClass( ICorDebugAppDomain *pAppDomain, ICorDebugClass *c);
COM_METHOD DebuggerError( ICorDebugProcess *pProcess, HRESULT errorHR, DWORD errorCode);
COM_METHOD LogMessage( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
LONG lLevel,
_In_ LPWSTR pLogSwitchName,
_In_ LPWSTR pMessage);
COM_METHOD LogSwitch( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
LONG lLevel,
ULONG ulReason,
_In_ LPWSTR pLogSwitchName,
_In_ LPWSTR pParentName);
COM_METHOD CreateAppDomain(ICorDebugProcess *pProcess,
ICorDebugAppDomain *pAppDomain);
COM_METHOD ExitAppDomain(ICorDebugProcess *pProcess,
ICorDebugAppDomain *pAppDomain);
COM_METHOD LoadAssembly(ICorDebugAppDomain *pAppDomain,
ICorDebugAssembly *pAssembly);
COM_METHOD UnloadAssembly(ICorDebugAppDomain *pAppDomain,
ICorDebugAssembly *pAssembly);
COM_METHOD ControlCTrap(ICorDebugProcess *pProcess);
COM_METHOD NameChange(ICorDebugAppDomain *pAppDomain, ICorDebugThread *pThread);
COM_METHOD UpdateModuleSymbols( ICorDebugAppDomain *pAppDomain,
ICorDebugModule *pModule,
IStream *pSymbolStream);
COM_METHOD EditAndContinueRemap( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pFunction,
BOOL fAccurate);
COM_METHOD BreakpointSetError( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugBreakpoint *pBreakpoint,
DWORD dwError);
///
/// Implementation of ICorDebugManagedCallback2
///
COM_METHOD FunctionRemapOpportunity( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pOldFunction,
ICorDebugFunction *pNewFunction,
ULONG32 oldILOffset);
COM_METHOD CreateConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId, _In_ LPWSTR pConnName);
COM_METHOD ChangeConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId );
COM_METHOD DestroyConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId);
COM_METHOD Exception(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFrame *pFrame,
ULONG32 nOffset,
CorDebugExceptionCallbackType dwEventType,
DWORD dwFlags );
COM_METHOD ExceptionUnwind(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
CorDebugExceptionUnwindCallbackType dwEventType,
DWORD dwFlags);
COM_METHOD FunctionRemapComplete( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pFunction);
COM_METHOD MDANotification(ICorDebugController * pController, ICorDebugThread *pThread, ICorDebugMDA * pMDA);
///
/// Implementation of ICorDebugManagedCallback3
///
// Implementation of ICorDebugManagedCallback3::CustomNotification
COM_METHOD CustomNotification(ICorDebugThread * pThread, ICorDebugAppDomain * pAppDomain);
///
/// Implementation of ICorDebugManagedCallback4
///
// Implementation of ICorDebugManagedCallback4::BeforeGarbageCollection
COM_METHOD BeforeGarbageCollection(ICorDebugProcess* pProcess);
// Implementation of ICorDebugManagedCallback4::AfterGarbageCollection
COM_METHOD AfterGarbageCollection(ICorDebugProcess* pProcess);
// Implementation of ICorDebugManagedCallback4::DataBreakpoint
COM_METHOD DataBreakpoint(ICorDebugProcess* pProcess, ICorDebugThread* pThread, BYTE* pContext, ULONG32 contextSize);
};
//
// Base class for event queue. These are nested into a singly linked list.
// Shim maintains event queue
//
class ManagedEvent
{
public:
// Need virtual dtor since this is a base class.
virtual ~ManagedEvent();
#ifdef _DEBUG
// For debugging, get a pointer value that can identify the type of this event.
void * GetDebugCookie();
#endif
// We'll have a lot of derived classes of ManagedEvent, and so encapsulating the arguments
// for the Dispatch() function lets us juggle them around easily without hitting every signature.
class DispatchArgs
{
public:
DispatchArgs(ICorDebugManagedCallback * pCallback1, ICorDebugManagedCallback2 * pCallback2, ICorDebugManagedCallback3 * pCallback3, ICorDebugManagedCallback4 * pCallback4);
ICorDebugManagedCallback * GetCallback1();
ICorDebugManagedCallback2 * GetCallback2();
ICorDebugManagedCallback3 * GetCallback3();
ICorDebugManagedCallback4 * GetCallback4();
protected:
ICorDebugManagedCallback * m_pCallback1;
ICorDebugManagedCallback2 * m_pCallback2;
ICorDebugManagedCallback3 * m_pCallback3;
ICorDebugManagedCallback4 * m_pCallback4;
};
// Returns: value of callback from end-user
virtual HRESULT Dispatch(DispatchArgs args) = 0;
// Returns 0 if none.
DWORD GetOSTid();
protected:
// Ctor for events with thread-affinity
ManagedEvent(ICorDebugThread * pThread);
// Ctor for events without thread affinity.
ManagedEvent();
friend class ManagedEventQueue;
ManagedEvent * m_pNext;
DWORD m_dwThreadId;
};
//
// Queue of managed events.
// Shim can use this to collect managed debug events, queue them, and then drain the event
// queue when a sync-complete occurs.
// Event queue gets initialized with a lock and will lock internally.
class ManagedEventQueue
{
public:
ManagedEventQueue();
void Init(RSLock * pLock);
// Remove event from the top. Caller then takes ownership of Event and will call Delete on it.
// Caller checks IsEmpty() first.
ManagedEvent * Dequeue();
// Queue owns the event and will delete it (unless it's dequeued first).
void QueueEvent(ManagedEvent * pEvent);
// Test if event queue is empty
bool IsEmpty();
// Empty event queue and delete all objects
void DeleteAll();
// Nothrows
BOOL HasQueuedCallbacks(ICorDebugThread * pThread);
// Save the current queue and start with a new empty queue
void SuspendQueue();
// Restore the saved queue onto the end of the current queue
void RestoreSuspendedQueue();
protected:
// The lock to be used for synchronizing all access to the queue
RSLock * m_pLock;
// If empty, First + Last are both NULL.
// Else first points to the head of the queue; and Last points to the end of the queue.
ManagedEvent * m_pFirstEvent;
ManagedEvent * m_pLastEvent;
};
//---------------------------------------------------------------------------------------
//
// Shim's layer on top of a process.
//
// Notes:
// This contains a V3 ICorDebugProcess, and provides V2 ICDProcess functionality.
//
class ShimProcess
{
// Delete via Ref count semantics.
~ShimProcess();
public:
// Initialize ref count is 0.
ShimProcess();
// Lifetime semantics handled by reference counting.
void AddRef();
void Release();
// Release all resources. Can be called multiple times.
void Dispose();
// Initialization phases.
// 1. allocate new ShimProcess(). This lets us spin up a Win32 EventThread, which can then
// be used to
// 2. Call ShimProcess::CreateProcess/DebugActiveProcess. This will call CreateAndStartWin32ET to
// craete the w32et.
// 3. Create OS-debugging pipeline. This establishes the physical OS process and gets us a pid/handle
// 4. pShim->InitializeDataTarget - this creates a reader/writer abstraction around the OS process.
// 5. pShim->SetProcess() - this connects the Shim to the ICDProcess object.
HRESULT InitializeDataTarget(const ProcessDescriptor * pProcessDescriptor);
void SetProcess(ICorDebugProcess * pProcess);
//-----------------------------------------------------------
// Creation
//-----------------------------------------------------------
static HRESULT CreateProcess(
Cordb * pCordb,
ICorDebugRemoteTarget * pRemoteTarget,
LPCWSTR programName,
_In_z_ LPWSTR programArgs,
LPSECURITY_ATTRIBUTES lpProcessAttributes,
LPSECURITY_ATTRIBUTES lpThreadAttributes,
BOOL bInheritHandles,
DWORD dwCreationFlags,
PVOID lpEnvironment,
LPCWSTR lpCurrentDirectory,
LPSTARTUPINFOW lpStartupInfo,
LPPROCESS_INFORMATION lpProcessInformation,
CorDebugCreateProcessFlags corDebugFlags
);
static HRESULT DebugActiveProcess(
Cordb * pCordb,
ICorDebugRemoteTarget * pRemoteTarget,
const ProcessDescriptor * pProcessDescriptor,
BOOL win32Attach
);
// Locates the DAC module adjacent to DBI
static HMODULE GetDacModule();
//
// Functions used by CordbProcess
//
// Determine if the calling thread is the win32 event thread.
bool IsWin32EventThread();
// Expose the W32ET thread to the CordbProcess so that it can emulate V2 behavior
CordbWin32EventThread * GetWin32EventThread();
// Accessor wrapper to mark whether we're interop-debugging.
void SetIsInteropDebugging(bool fIsInteropDebugging);
// Handle a debug event.
HRESULT HandleWin32DebugEvent(const DEBUG_EVENT * pEvent);
ManagedEventQueue * GetManagedEventQueue();
ManagedEvent * DequeueManagedEvent();
ShimProxyCallback * GetShimCallback();
// Begin Queing the fake attach events.
void BeginQueueFakeAttachEvents();
// Queue fake attach events if needed
void QueueFakeAttachEventsIfNeeded(bool fRealCreateProcessEvent);
// Actually do the work to queue the fake attach events.
void QueueFakeAttachEvents();
// Helper to queue fake assembly and mdule events
void QueueFakeAssemblyAndModuleEvent(ICorDebugAssembly * pAssembly);
// Queue fake thread-create events on attach. No ordering.
HRESULT QueueFakeThreadAttachEventsNoOrder();
bool IsThreadSuspendedOrHijacked(ICorDebugThread * pThread);
// Expose m_attached to CordbProcess.
bool GetAttached();
// We need to know whether we are in the CreateProcess callback to be able to
// return the v2.0 hresults from code:CordbProcess::SetDesiredNGENCompilerFlags
// when we are using the shim.
//
// Expose m_fInCreateProcess
bool GetInCreateProcess();
void SetInCreateProcess(bool value);
// We need to know whether we are in the FakeLoadModule callback to be able to
// return the v2.0 hresults from code:CordbModule::SetJITCompilerFlags when
// we are using the shim.
//
// Expose m_fInLoadModule
bool GetInLoadModule();
void SetInLoadModule(bool value);
// When we get a continue, we need to clear the flags indicating we're still in a callback
void NotifyOnContinue ();
// The RS calls this function when the stack is about to be changed in any way, e.g. continue, SetIP,
// etc.
void NotifyOnStackInvalidate();
// Helpers to filter HRs to emulate V2 error codes.
HRESULT FilterSetNgenHresult(HRESULT hr);
HRESULT FilterSetJitFlagsHresult(HRESULT hr);
//.............................................................
// Lookup or create a ShimStackWalk for the specified thread. ShimStackWalk and ICorDebugThread has
// a 1:1 relationship.
ShimStackWalk * LookupOrCreateShimStackWalk(ICorDebugThread * pThread);
// Clear all ShimStackWalks and flush all the caches.
void ClearAllShimStackWalk();
// Get the corresponding ICDProcess object.
ICorDebugProcess * GetProcess();
// Get the data target to access the debuggee.
ICorDebugMutableDataTarget * GetDataTarget();
// Get the native event pipeline
INativeEventPipeline * GetNativePipeline();
// Are we interop-debugging?
bool IsInteropDebugging();
// Finish all the necessary initialization work and queue up any necessary fake attach events before
// dispatching an event.
void PreDispatchEvent(bool fRealCreateProcessEvent = false);
// Retrieve the IP address and the port number of the debugger proxy.
MachineInfo GetMachineInfo();
// Add an entry in the duplicate creation event hash table for the specified key.
void AddDuplicateCreationEvent(void * pKey);
// Check if a duplicate creation event entry exists for the specified key. If so, remove it.
bool RemoveDuplicateCreationEventIfPresent(void * pKey);
void SetMarkAttachPendingEvent();
void SetTerminatingEvent();
RSLock * GetShimLock();
protected:
// Reference count.
LONG m_ref;
//
// Helper functions
//
HRESULT CreateAndStartWin32ET(Cordb * pCordb);
//
// Synchronization events to ensure that AttachPending bit is marked before DebugActiveProcess
// returns or debugger is detaching
//
HANDLE m_markAttachPendingEvent;
HANDLE m_terminatingEvent;
//
// Event Queues
//
// Shim maintains event queue to emulate V2 semantics.
// In V2, IcorDebug internally queued debug events and dispatched them
// once the debuggee was synchronized. In V3, ICorDebug dispatches events immediately.
// The event queue is moved into the shim to build V2 semantics of V3 behavior.
ManagedEventQueue m_eventQueue;
// Lock to protect Shim data structures. This is currently a small lock that
// protects leaf-level structures, but it may grow to protect larger things.
RSLock m_ShimLock;
// Serializes ShimProcess:Dispose() with other ShimProcess functions. For now, this
// cannot be the same as m_ShimLock. See LL_SHIM_PROCESS_DISPOSE_LOCK for more
// information
RSLock m_ShimProcessDisposeLock;
// Sticky bit to do lazy-initialization on the first managed event.
bool m_fFirstManagedEvent;
RSExtSmartPtr<ShimProxyCallback> m_pShimCallback;
// This is for emulating V2 Attach. Initialized to false, and then set to true if we ened to send fake attach events.
// Reset to false once the events are sent. See code:ShimProcess::QueueFakeAttachEventsIfNeeded
bool m_fNeedFakeAttachEvents;
// True if the process was created from an attach (DebugActiveProcess); False if it was launched (CreateProcess)
// This is used to send an Attach IPC event, and also used to provide more specific error codes.
bool m_attached;
// True iff we are in the shim's CreateProcess callback. This is used to determine which hresult to
// return from code:CordbProcess::SetDesiredNGENCompilerFlags so we correctly emulate the behavior of v2.0.
// This is set at the beginning of the callback and cleared in code:CordbProcess::ContinueInternal.
bool m_fInCreateProcess;
// True iff we are in the shim's FakeLoadModule callback. This is used to determine which hresult to
// return from code:CordbModule::SetJITCompilerFlags so we correctly emulate the behavior of v2.0.
// This is set at the beginning of the callback and cleared in code:CordbProcess::ContinueInternal.
bool m_fInLoadModule;
//
// Data
//
// Pointer to CordbProcess.
// @dbgtodo shim: We'd like this to eventually go through public interfaces (ICorDebugProcess)
IProcessShimHooks * m_pProcess; // Reference is kept by m_pIProcess;
RSExtSmartPtr<ICorDebugProcess> m_pIProcess;
// Win32EvenThread, which is the thread that uses the native debug API.
CordbWin32EventThread * m_pWin32EventThread;
// Actual data-target. Since we're shimming V2 scenarios, and V3 is always
// live-debugging, this is always a live data-target.
RSExtSmartPtr<ShimDataTarget> m_pLiveDataTarget;
// If true, the shim is emulating interop-debugging
// If false, the shim is emulating managed-only debugging.
// Both managed and native debugging have the same underlying pipeline (built
// on native-debug events). So the only difference is how they handle those events.
bool m_fIsInteropDebugging;
// true iff Dispose() was called. Consult this and do your work under m_ShimProcessDisposeLock
// to serialize yourself against a call to Dispose(). This protects your work
// from the user doing a Debugger Detach in the middle.
bool m_fIsDisposed;
//.............................................................................
//
// Members used for handling native events when managed-only debugging.
//
//.............................................................................
// Default handler for native events when managed-only debugging.
void DefaultEventHandler(const DEBUG_EVENT * pEvent, DWORD * pdwContinueStatus);
// Given a debug event, track the file handles.
void TrackFileHandleForDebugEvent(const DEBUG_EVENT * pEvent);
// Have we gotten the loader breakpoint yet?
// A Debugger needs to do special work to skip the loader breakpoint,
// and that's also when it should dispatch the faked managed attach events.
bool m_loaderBPReceived;
// Raw callback for ContinueStatusChanged from Data-target.
static HRESULT ContinueStatusChanged(void * pUserData, DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus);
// Real worker to update ContinueStatusChangedData
HRESULT ContinueStatusChangedWorker(DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus);
struct ContinueStatusChangedData
{
void Clear();
bool IsSet();
// Tid of Thread changed
DWORD m_dwThreadId;
// New continue status.
CORDB_CONTINUE_STATUS m_status;
} m_ContinueStatusChangedData;
// the hash table of ShimStackWalks
ShimStackWalkHashTable * m_pShimStackWalkHashTable;
// the hash table of duplicate creation events
DuplicateCreationEventsHashTable * m_pDupeEventsHashTable;
MachineInfo m_machineInfo;
};
//---------------------------------------------------------------------------------------
//
// This is the container class of ShimChains, ICorDebugFrames, ShimChainEnums, and ShimFrameEnums.
// It has a 1:1 relationship with ICorDebugThreads. Upon creation, this class walks the entire stack and
// caches all the stack frames and chains. The enumerators are created on demand.
//
class ShimStackWalk
{
public:
ShimStackWalk(ShimProcess * pProcess, ICorDebugThread * pThread);
~ShimStackWalk();
// These functions do not adjust the reference count.
ICorDebugThread * GetThread();
ShimChain * GetChain(UINT32 index);
ICorDebugFrame * GetFrame(UINT32 index);
// Get the number of frames and chains.
ULONG GetChainCount();
ULONG GetFrameCount();
RSLock * GetShimLock();
// Add ICDChainEnum and ICDFrameEnum.
void AddChainEnum(ShimChainEnum * pChainEnum);
void AddFrameEnum(ShimFrameEnum * pFrameEnum);
// The next two functions are for ShimStackWalkHashTableTraits.
ICorDebugThread * GetKey();
static UINT32 Hash(ICorDebugThread * pThread);
// Check if the specified frame is the leaf frame according to the V2 definition.
BOOL IsLeafFrame(ICorDebugFrame * pFrame);
// Check if the two specified frames are the same. This function checks the SPs, frame address, etc.
// instead of just checking for pointer equality.
BOOL IsSameFrame(ICorDebugFrame * pLeft, ICorDebugFrame * pRight);
// The following functions are entry point into the ShimStackWalk. They are called by the RS.
void EnumerateChains(ICorDebugChainEnum ** ppChainEnum);
void GetActiveChain(ICorDebugChain ** ppChain);
void GetActiveFrame(ICorDebugFrame ** ppFrame);
void GetActiveRegisterSet(ICorDebugRegisterSet ** ppRegisterSet);
void GetChainForFrame(ICorDebugFrame * pFrame, ICorDebugChain ** ppChain);
void GetCallerForFrame(ICorDebugFrame * pFrame, ICorDebugFrame ** ppCallerFrame);
void GetCalleeForFrame(ICorDebugFrame * pFrame, ICorDebugFrame ** ppCalleeFrame);
private:
//---------------------------------------------------------------------------------------
//
// This is a helper class used to store the information of a chain during a stackwalk. A chain is marked
// by the CONTEXT on the leaf boundary and a FramePointer on the root boundary. Also, notice that we
// are keeping two CONTEXTs. This is because some chain types may cancel a previous unmanaged chain.
// For example, a CHAIN_FUNC_EVAL chain cancels any CHAIN_ENTER_UNMANAGED chain immediately preceding
// it. In this case, the leaf boundary of the CHAIN_FUNC_EVAL chain is marked by the CONTEXT of the
// previous CHAIN_ENTER_MANAGED, not the previous CHAIN_ENTER_UNMANAGED.
//
struct ChainInfo
{
public:
ChainInfo() : m_rootFP(LEAF_MOST_FRAME), m_reason(CHAIN_NONE), m_fNeedEnterManagedChain(FALSE), m_fLeafNativeContextIsValid(FALSE) {}
void CancelUMChain() { m_reason = CHAIN_NONE; }
BOOL IsTrackingUMChain() { return (m_reason == CHAIN_ENTER_UNMANAGED); }
DT_CONTEXT m_leafNativeContext;
DT_CONTEXT m_leafManagedContext;
FramePointer m_rootFP;
CorDebugChainReason m_reason;
bool m_fNeedEnterManagedChain;
bool m_fLeafNativeContextIsValid;
};
//---------------------------------------------------------------------------------------
//
// This is a helper class used to store information during a stackwalk. Conceptually it is a simplified
// version of FrameInfo used on the LS in V2.
//
struct StackWalkInfo
{
public:
StackWalkInfo();
~StackWalkInfo();
// Reset all the per-frame information.
void ResetForNextFrame();
// During the stackwalk, we need to find out whether we should process the next stack frame or the
// next internal frame. These functions help us determine whether we have exhausted one or both
// types of frames. The stackwalk is finished when both types are exhausted.
bool ExhaustedAllFrames();
bool ExhaustedAllStackFrames();
bool ExhaustedAllInternalFrames();
// Simple helper function to get the current internal frame.
ICorDebugInternalFrame2 * GetCurrentInternalFrame();
// Check whether we are processing the first frame.
BOOL IsLeafFrame();
// Check whether we are skipping frames because of a child frame.
BOOL IsSkippingFrame();
// Indicates whether we are dealing with a converted frame.
// See code:CordbThread::ConvertFrameForILMethodWithoutMetadata.
BOOL HasConvertedFrame();
// Store the child frame we are currently trying to find the parent frame for.
// If this is NULL, then we are not skipping frames.
RSExtSmartPtr<ICorDebugNativeFrame2> m_pChildFrame;
// Store the converted frame, if any.
RSExtSmartPtr<ICorDebugInternalFrame2> m_pConvertedInternalFrame2;
// Store the array of internal frames. This is an array of RSExtSmartPtrs, and so each element
// is protected, and we only need to call Clear() to release each element and free all the memory.
RSExtPtrArray<ICorDebugInternalFrame2> m_ppInternalFrame2;
UINT32 m_cChain; // number of chains
UINT32 m_cFrame; // number of frames
UINT32 m_firstFrameInChain; // the index of the first frame in the current chain
UINT32 m_cInternalFrames; // number of internal frames
UINT32 m_curInternalFrame; // the index of the current internal frame being processed
CorDebugInternalFrameType m_internalFrameType;
bool m_fExhaustedAllStackFrames;
// Indicate whether we are processing an internal frame or a stack frame.
bool m_fProcessingInternalFrame;
// Indicate whether we should skip the current chain because it's a chain derived from a leaf frame
// of type TYPE_INTERNAL. This is the behaviour in V2.
// See code:DebuggerWalkStackProc.
bool m_fSkipChain;
// Indicate whether the current frame is the first frame we process.
bool m_fLeafFrame;
// Indicate whether we are processing a converted frame.
bool m_fHasConvertedFrame;
};
// A ShimStackWalk is deleted when a process is continued, or when the stack is changed in any way
// (e.g. SetIP, EnC, etc.).
void Populate();
void Clear();
// Get a FramePointer to mark the root boundary of a chain.
FramePointer GetFramePointerForChain(DT_CONTEXT * pContext);
FramePointer GetFramePointerForChain(ICorDebugInternalFrame2 * pInternalFrame2);
CorDebugInternalFrameType GetInternalFrameType(ICorDebugInternalFrame2 * pFrame2);
// Append a frame to the array.
void AppendFrame(ICorDebugFrame * pFrame, StackWalkInfo * pStackWalkInfo);
void AppendFrame(ICorDebugInternalFrame2 * pInternalFrame2, StackWalkInfo * pStackWalkInfo);
// Append a chain to the array.
void AppendChainWorker(StackWalkInfo * pStackWalkInfo,
DT_CONTEXT * pLeafContext,
FramePointer fpRoot,
CorDebugChainReason chainReason,
BOOL fIsManagedChain);
void AppendChain(ChainInfo * pChainInfo, StackWalkInfo * pStackWalkInfo);
// Save information on the ChainInfo regarding the current chain.
void SaveChainContext(ICorDebugStackWalk * pSW, ChainInfo * pChainInfo, DT_CONTEXT * pContext);
// Check what we are process next, a internal frame or a stack frame.
BOOL CheckInternalFrame(ICorDebugFrame * pNextStackFrame,
StackWalkInfo * pStackWalkInfo,
ICorDebugThread3 * pThread3,
ICorDebugStackWalk * pSW);
// Convert an ICDInternalFrame to another ICDInternalFrame due to IL methods without metadata.
// See code:CordbThread::ConvertFrameForILMethodWithoutMetadata.
BOOL ConvertInternalFrameToDynamicMethod(StackWalkInfo * pStackWalkInfo);
// Convert an ICDNativeFrame to an ICDInternalFrame due to IL methods without metadata.
// See code:CordbThread::ConvertFrameForILMethodWithoutMetadata.
BOOL ConvertStackFrameToDynamicMethod(ICorDebugFrame * pFrame, StackWalkInfo * pStackWalkInfo);
// Process an unmanaged chain.
BOOL ShouldTrackUMChain(StackWalkInfo * pswInfo);
void TrackUMChain(ChainInfo * pChainInfo, StackWalkInfo * pStackWalkInfo);
// Check whether the internal frame is a newly exposed type in Arrowhead. If so, then the shim should
// not expose it.
BOOL IsV3FrameType(CorDebugInternalFrameType type);
// Check whether the specified frame represents a dynamic method.
BOOL IsILFrameWithoutMetadata(ICorDebugFrame * pFrame);
CDynArray<ShimChain *> m_stackChains; // growable ordered array of chains and frames
CDynArray<ICorDebugFrame *> m_stackFrames;
ShimChainEnum * m_pChainEnumList; // linked list of ShimChainEnum and ShimFrameEnum
ShimFrameEnum * m_pFrameEnumList;
// the thread on which we are doing a stackwalk, i.e. the "owning" thread
RSExtSmartPtr<ShimProcess> m_pProcess;
RSExtSmartPtr<ICorDebugThread> m_pThread;
};
//---------------------------------------------------------------------------------------
//
// This class implements the deprecated ICDChain interface.
//
class ShimChain : public ICorDebugChain
{
public:
ShimChain(ShimStackWalk * pSW,
DT_CONTEXT * pContext,
FramePointer fpRoot,
UINT32 chainIndex,
UINT32 frameStartIndex,
UINT32 frameEndIndex,
CorDebugChainReason chainReason,
BOOL fIsManaged,
RSLock * pShimLock);
virtual ~ShimChain();
void Neuter();
BOOL IsNeutered();
//
// IUnknown
//
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void ** ppInterface);
//
// ICorDebugChain
//
COM_METHOD GetThread(ICorDebugThread ** ppThread);
COM_METHOD GetStackRange(CORDB_ADDRESS * pStart, CORDB_ADDRESS * pEnd);
COM_METHOD GetContext(ICorDebugContext ** ppContext);
COM_METHOD GetCaller(ICorDebugChain ** ppChain);
COM_METHOD GetCallee(ICorDebugChain ** ppChain);
COM_METHOD GetPrevious(ICorDebugChain ** ppChain);
COM_METHOD GetNext(ICorDebugChain ** ppChain);
COM_METHOD IsManaged(BOOL * pManaged);
COM_METHOD EnumerateFrames(ICorDebugFrameEnum ** ppFrames);
COM_METHOD GetActiveFrame(ICorDebugFrame ** ppFrame);
COM_METHOD GetRegisterSet(ICorDebugRegisterSet ** ppRegisters);
COM_METHOD GetReason(CorDebugChainReason * pReason);
//
// accessors
//
// Get the owning ShimStackWalk.
ShimStackWalk * GetShimStackWalk();
// Get the first and last index of the frame owned by this chain. This class itself doesn't store the
// frames. Rather, the frames are stored on the ShimStackWalk. This class just stores the indices.
// Note that the indices are [firstIndex, lastIndex), i.e. the last index is exclusive.
UINT32 GetFirstFrameIndex();
UINT32 GetLastFrameIndex();
private:
// A chain describes a stack range within the stack. This includes a CONTEXT at the start (leafmost)
// end of the chain, and a frame pointer where the chain ends (rootmost). This stack range is exposed
// publicly via ICDChain::GetStackRange(), and can be used to stitch managed and native stack frames
// together into a unified stack.
DT_CONTEXT m_context; // the leaf end of the chain
FramePointer m_fpRoot; // the root end of the chain
ShimStackWalk * m_pStackWalk; // the owning ShimStackWalk
Volatile<ULONG> m_refCount;
// The 0-based index of this chain in the ShimStackWalk's chain array (m_pStackWalk->m_stackChains).
UINT32 m_chainIndex;
// The 0-based index of the first frame owned by this chain in the ShimStackWalk's frame array
// (m_pStackWalk->m_stackFrames). See code::ShimChain::GetFirstFrameIndex().
UINT32 m_frameStartIndex;
// The 0-based index of the last frame owned by this chain in the ShimStackWalk's frame array
// (m_pStackWalk->m_stackFrames). This index is exlusive. See code::ShimChain::GetLastFrameIndex().
UINT32 m_frameEndIndex;
CorDebugChainReason m_chainReason;
BOOL m_fIsManaged; // indicates whether this chain contains managed frames
BOOL m_fIsNeutered;
RSLock * m_pShimLock; // shim lock from ShimProcess to protect neuteredness checks
};
//---------------------------------------------------------------------------------------
//
// This class implements the deprecated ICDChainEnum interface.
//
class ShimChainEnum : public ICorDebugChainEnum
{
public:
ShimChainEnum(ShimStackWalk * pSW, RSLock * pShimLock);
virtual ~ShimChainEnum();
void Neuter();
BOOL IsNeutered();
//
// IUnknown
//
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void ** ppInterface);
//
// ICorDebugEnum
//
COM_METHOD Skip(ULONG celt);
COM_METHOD Reset();
COM_METHOD Clone(ICorDebugEnum ** ppEnum);
COM_METHOD GetCount(ULONG * pcChains);
//
// ICorDebugChainEnum
//
COM_METHOD Next(ULONG cChains, ICorDebugChain * rgpChains[], ULONG * pcChainsFetched);
//
// accessors
//
// used to link ShimChainEnums in a list
ShimChainEnum * GetNext();
void SetNext(ShimChainEnum * pNext);
private:
ShimStackWalk * m_pStackWalk; // the owning ShimStackWalk
// This points to the next ShimChainEnum in the linked list of ShimChainEnums to be cleaned up.
// The head of the list is on the ShimStackWalk (m_pStackWalk->m_pChainEnumList).
ShimChainEnum * m_pNext;
UINT32 m_currentChainIndex; // the index of the current ShimChain being enumerated
Volatile<ULONG> m_refCount;
BOOL m_fIsNeutered;
RSLock * m_pShimLock; // shim lock from ShimProcess to protect neuteredness checks
};
//---------------------------------------------------------------------------------------
//
// This class implements the deprecated ICDFrameEnum interface.
//
class ShimFrameEnum : public ICorDebugFrameEnum
{
public:
ShimFrameEnum(ShimStackWalk * pSW, ShimChain * pChain, UINT32 frameStartIndex, UINT32 frameEndIndex, RSLock * pShimLock);
virtual ~ShimFrameEnum();
void Neuter();
BOOL IsNeutered();
//
// IUnknown
//
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void ** ppInterface);
//
// ICorDebugEnum
//
COM_METHOD Skip(ULONG celt);
COM_METHOD Reset();
COM_METHOD Clone(ICorDebugEnum ** ppEnum);
COM_METHOD GetCount(ULONG * pcFrames);
//
// ICorDebugFrameEnum
//
COM_METHOD Next(ULONG cFrames, ICorDebugFrame * rgpFrames[], ULONG * pcFramesFetched);
//
// accessors
//
// used to link ShimChainEnums in a list
ShimFrameEnum * GetNext();
void SetNext(ShimFrameEnum * pNext);
private:
ShimStackWalk * m_pStackWalk; // the owning ShimStackWalk
ShimChain * m_pChain; // the owning ShimChain
RSLock * m_pShimLock; // shim lock from ShimProcess to protect neuteredness checks
// This points to the next ShimFrameEnum in the linked list of ShimFrameEnums to be cleaned up.
// The head of the list is on the ShimStackWalk (m_pStackWalk->m_pFrameEnumList).
ShimFrameEnum * m_pNext;
UINT32 m_currentFrameIndex; // the current ICDFrame being enumerated
UINT32 m_endFrameIndex; // the last index (exclusive) of the frame owned by the chain;
// see code:ShimChain::GetLastFrameIndex
Volatile<ULONG> m_refCount;
BOOL m_fIsNeutered;
};
#endif // SHIMPRIV_H
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// shimprivate.h
//
//
// private header for RS shim which bridges from V2 to V3.
//*****************************************************************************
#ifndef SHIMPRIV_H
#define SHIMPRIV_H
#include "helpers.h"
#include "shimdatatarget.h"
#include <shash.h>
// Forward declarations
class CordbWin32EventThread;
class Cordb;
class ShimStackWalk;
class ShimChain;
class ShimChainEnum;
class ShimFrameEnum;
// This struct specifies that it's a hash table of ShimStackWalk * using ICorDebugThread as the key.
struct ShimStackWalkHashTableTraits : public PtrSHashTraits<ShimStackWalk, ICorDebugThread *> {};
typedef SHash<ShimStackWalkHashTableTraits> ShimStackWalkHashTable;
//---------------------------------------------------------------------------------------
//
// Simple struct for storing a void *. This is to be used with a SHash hash table.
//
struct DuplicateCreationEventEntry
{
public:
DuplicateCreationEventEntry(void * pKey) : m_pKey(pKey) {};
// These functions must be defined for DuplicateCreationEventsHashTableTraits.
void * GetKey() {return m_pKey;};
static UINT32 Hash(void * pKey) {return (UINT32)(size_t)pKey;};
private:
void * m_pKey;
};
// This struct specifies that it's a hash table of DuplicateCreationEventEntry * using a void * as the key.
// The void * is expected to be an ICDProcess/ICDAppDomain/ICDThread/ICDAssembly/ICDThread interface pointer.
struct DuplicateCreationEventsHashTableTraits : public PtrSHashTraits<DuplicateCreationEventEntry, void *> {};
typedef SHash<DuplicateCreationEventsHashTableTraits> DuplicateCreationEventsHashTable;
//
// Callback that shim provides, which then queues up the events.
//
class ShimProxyCallback :
public ICorDebugManagedCallback,
public ICorDebugManagedCallback2,
public ICorDebugManagedCallback3,
public ICorDebugManagedCallback4
{
ShimProcess * m_pShim; // weak reference
LONG m_cRef;
public:
ShimProxyCallback(ShimProcess * pShim);
virtual ~ShimProxyCallback() {}
// Implement IUnknown
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void **ppInterface);
//
// Implementation of ICorDebugManagedCallback
//
COM_METHOD Breakpoint( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugBreakpoint *pBreakpoint);
COM_METHOD StepComplete( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugStepper *pStepper,
CorDebugStepReason reason);
COM_METHOD Break( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *thread);
COM_METHOD Exception( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
BOOL unhandled);
COM_METHOD EvalComplete( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugEval *pEval);
COM_METHOD EvalException( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugEval *pEval);
COM_METHOD CreateProcess( ICorDebugProcess *pProcess);
void QueueCreateProcess( ICorDebugProcess *pProcess);
COM_METHOD ExitProcess( ICorDebugProcess *pProcess);
COM_METHOD CreateThread( ICorDebugAppDomain *pAppDomain, ICorDebugThread *thread);
COM_METHOD ExitThread( ICorDebugAppDomain *pAppDomain, ICorDebugThread *thread);
COM_METHOD LoadModule( ICorDebugAppDomain *pAppDomain, ICorDebugModule *pModule);
void FakeLoadModule(ICorDebugAppDomain *pAppDomain, ICorDebugModule *pModule);
COM_METHOD UnloadModule( ICorDebugAppDomain *pAppDomain, ICorDebugModule *pModule);
COM_METHOD LoadClass( ICorDebugAppDomain *pAppDomain, ICorDebugClass *c);
COM_METHOD UnloadClass( ICorDebugAppDomain *pAppDomain, ICorDebugClass *c);
COM_METHOD DebuggerError( ICorDebugProcess *pProcess, HRESULT errorHR, DWORD errorCode);
COM_METHOD LogMessage( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
LONG lLevel,
_In_ LPWSTR pLogSwitchName,
_In_ LPWSTR pMessage);
COM_METHOD LogSwitch( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
LONG lLevel,
ULONG ulReason,
_In_ LPWSTR pLogSwitchName,
_In_ LPWSTR pParentName);
COM_METHOD CreateAppDomain(ICorDebugProcess *pProcess,
ICorDebugAppDomain *pAppDomain);
COM_METHOD ExitAppDomain(ICorDebugProcess *pProcess,
ICorDebugAppDomain *pAppDomain);
COM_METHOD LoadAssembly(ICorDebugAppDomain *pAppDomain,
ICorDebugAssembly *pAssembly);
COM_METHOD UnloadAssembly(ICorDebugAppDomain *pAppDomain,
ICorDebugAssembly *pAssembly);
COM_METHOD ControlCTrap(ICorDebugProcess *pProcess);
COM_METHOD NameChange(ICorDebugAppDomain *pAppDomain, ICorDebugThread *pThread);
COM_METHOD UpdateModuleSymbols( ICorDebugAppDomain *pAppDomain,
ICorDebugModule *pModule,
IStream *pSymbolStream);
COM_METHOD EditAndContinueRemap( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pFunction,
BOOL fAccurate);
COM_METHOD BreakpointSetError( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugBreakpoint *pBreakpoint,
DWORD dwError);
///
/// Implementation of ICorDebugManagedCallback2
///
COM_METHOD FunctionRemapOpportunity( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pOldFunction,
ICorDebugFunction *pNewFunction,
ULONG32 oldILOffset);
COM_METHOD CreateConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId, _In_ LPWSTR pConnName);
COM_METHOD ChangeConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId );
COM_METHOD DestroyConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId);
COM_METHOD Exception(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFrame *pFrame,
ULONG32 nOffset,
CorDebugExceptionCallbackType dwEventType,
DWORD dwFlags );
COM_METHOD ExceptionUnwind(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
CorDebugExceptionUnwindCallbackType dwEventType,
DWORD dwFlags);
COM_METHOD FunctionRemapComplete( ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pFunction);
COM_METHOD MDANotification(ICorDebugController * pController, ICorDebugThread *pThread, ICorDebugMDA * pMDA);
///
/// Implementation of ICorDebugManagedCallback3
///
// Implementation of ICorDebugManagedCallback3::CustomNotification
COM_METHOD CustomNotification(ICorDebugThread * pThread, ICorDebugAppDomain * pAppDomain);
///
/// Implementation of ICorDebugManagedCallback4
///
// Implementation of ICorDebugManagedCallback4::BeforeGarbageCollection
COM_METHOD BeforeGarbageCollection(ICorDebugProcess* pProcess);
// Implementation of ICorDebugManagedCallback4::AfterGarbageCollection
COM_METHOD AfterGarbageCollection(ICorDebugProcess* pProcess);
// Implementation of ICorDebugManagedCallback4::DataBreakpoint
COM_METHOD DataBreakpoint(ICorDebugProcess* pProcess, ICorDebugThread* pThread, BYTE* pContext, ULONG32 contextSize);
};
//
// Base class for event queue. These are nested into a singly linked list.
// Shim maintains event queue
//
class ManagedEvent
{
public:
// Need virtual dtor since this is a base class.
virtual ~ManagedEvent();
#ifdef _DEBUG
// For debugging, get a pointer value that can identify the type of this event.
void * GetDebugCookie();
#endif
// We'll have a lot of derived classes of ManagedEvent, and so encapsulating the arguments
// for the Dispatch() function lets us juggle them around easily without hitting every signature.
class DispatchArgs
{
public:
DispatchArgs(ICorDebugManagedCallback * pCallback1, ICorDebugManagedCallback2 * pCallback2, ICorDebugManagedCallback3 * pCallback3, ICorDebugManagedCallback4 * pCallback4);
ICorDebugManagedCallback * GetCallback1();
ICorDebugManagedCallback2 * GetCallback2();
ICorDebugManagedCallback3 * GetCallback3();
ICorDebugManagedCallback4 * GetCallback4();
protected:
ICorDebugManagedCallback * m_pCallback1;
ICorDebugManagedCallback2 * m_pCallback2;
ICorDebugManagedCallback3 * m_pCallback3;
ICorDebugManagedCallback4 * m_pCallback4;
};
// Returns: value of callback from end-user
virtual HRESULT Dispatch(DispatchArgs args) = 0;
// Returns 0 if none.
DWORD GetOSTid();
protected:
// Ctor for events with thread-affinity
ManagedEvent(ICorDebugThread * pThread);
// Ctor for events without thread affinity.
ManagedEvent();
friend class ManagedEventQueue;
ManagedEvent * m_pNext;
DWORD m_dwThreadId;
};
//
// Queue of managed events.
// Shim can use this to collect managed debug events, queue them, and then drain the event
// queue when a sync-complete occurs.
// Event queue gets initialized with a lock and will lock internally.
class ManagedEventQueue
{
public:
ManagedEventQueue();
void Init(RSLock * pLock);
// Remove event from the top. Caller then takes ownership of Event and will call Delete on it.
// Caller checks IsEmpty() first.
ManagedEvent * Dequeue();
// Queue owns the event and will delete it (unless it's dequeued first).
void QueueEvent(ManagedEvent * pEvent);
// Test if event queue is empty
bool IsEmpty();
// Empty event queue and delete all objects
void DeleteAll();
// Nothrows
BOOL HasQueuedCallbacks(ICorDebugThread * pThread);
// Save the current queue and start with a new empty queue
void SuspendQueue();
// Restore the saved queue onto the end of the current queue
void RestoreSuspendedQueue();
protected:
// The lock to be used for synchronizing all access to the queue
RSLock * m_pLock;
// If empty, First + Last are both NULL.
// Else first points to the head of the queue; and Last points to the end of the queue.
ManagedEvent * m_pFirstEvent;
ManagedEvent * m_pLastEvent;
};
//---------------------------------------------------------------------------------------
//
// Shim's layer on top of a process.
//
// Notes:
// This contains a V3 ICorDebugProcess, and provides V2 ICDProcess functionality.
//
class ShimProcess
{
// Delete via Ref count semantics.
~ShimProcess();
public:
// Initialize ref count is 0.
ShimProcess();
// Lifetime semantics handled by reference counting.
void AddRef();
void Release();
// Release all resources. Can be called multiple times.
void Dispose();
// Initialization phases.
// 1. allocate new ShimProcess(). This lets us spin up a Win32 EventThread, which can then
// be used to
// 2. Call ShimProcess::CreateProcess/DebugActiveProcess. This will call CreateAndStartWin32ET to
// craete the w32et.
// 3. Create OS-debugging pipeline. This establishes the physical OS process and gets us a pid/handle
// 4. pShim->InitializeDataTarget - this creates a reader/writer abstraction around the OS process.
// 5. pShim->SetProcess() - this connects the Shim to the ICDProcess object.
HRESULT InitializeDataTarget(const ProcessDescriptor * pProcessDescriptor);
void SetProcess(ICorDebugProcess * pProcess);
//-----------------------------------------------------------
// Creation
//-----------------------------------------------------------
static HRESULT CreateProcess(
Cordb * pCordb,
ICorDebugRemoteTarget * pRemoteTarget,
LPCWSTR programName,
_In_z_ LPWSTR programArgs,
LPSECURITY_ATTRIBUTES lpProcessAttributes,
LPSECURITY_ATTRIBUTES lpThreadAttributes,
BOOL bInheritHandles,
DWORD dwCreationFlags,
PVOID lpEnvironment,
LPCWSTR lpCurrentDirectory,
LPSTARTUPINFOW lpStartupInfo,
LPPROCESS_INFORMATION lpProcessInformation,
CorDebugCreateProcessFlags corDebugFlags
);
static HRESULT DebugActiveProcess(
Cordb * pCordb,
ICorDebugRemoteTarget * pRemoteTarget,
const ProcessDescriptor * pProcessDescriptor,
BOOL win32Attach
);
// Locates the DAC module adjacent to DBI
static HMODULE GetDacModule();
//
// Functions used by CordbProcess
//
// Determine if the calling thread is the win32 event thread.
bool IsWin32EventThread();
// Expose the W32ET thread to the CordbProcess so that it can emulate V2 behavior
CordbWin32EventThread * GetWin32EventThread();
// Accessor wrapper to mark whether we're interop-debugging.
void SetIsInteropDebugging(bool fIsInteropDebugging);
// Handle a debug event.
HRESULT HandleWin32DebugEvent(const DEBUG_EVENT * pEvent);
ManagedEventQueue * GetManagedEventQueue();
ManagedEvent * DequeueManagedEvent();
ShimProxyCallback * GetShimCallback();
// Begin Queing the fake attach events.
void BeginQueueFakeAttachEvents();
// Queue fake attach events if needed
void QueueFakeAttachEventsIfNeeded(bool fRealCreateProcessEvent);
// Actually do the work to queue the fake attach events.
void QueueFakeAttachEvents();
// Helper to queue fake assembly and mdule events
void QueueFakeAssemblyAndModuleEvent(ICorDebugAssembly * pAssembly);
// Queue fake thread-create events on attach. No ordering.
HRESULT QueueFakeThreadAttachEventsNoOrder();
bool IsThreadSuspendedOrHijacked(ICorDebugThread * pThread);
// Expose m_attached to CordbProcess.
bool GetAttached();
// We need to know whether we are in the CreateProcess callback to be able to
// return the v2.0 hresults from code:CordbProcess::SetDesiredNGENCompilerFlags
// when we are using the shim.
//
// Expose m_fInCreateProcess
bool GetInCreateProcess();
void SetInCreateProcess(bool value);
// We need to know whether we are in the FakeLoadModule callback to be able to
// return the v2.0 hresults from code:CordbModule::SetJITCompilerFlags when
// we are using the shim.
//
// Expose m_fInLoadModule
bool GetInLoadModule();
void SetInLoadModule(bool value);
// When we get a continue, we need to clear the flags indicating we're still in a callback
void NotifyOnContinue ();
// The RS calls this function when the stack is about to be changed in any way, e.g. continue, SetIP,
// etc.
void NotifyOnStackInvalidate();
// Helpers to filter HRs to emulate V2 error codes.
HRESULT FilterSetNgenHresult(HRESULT hr);
HRESULT FilterSetJitFlagsHresult(HRESULT hr);
//.............................................................
// Lookup or create a ShimStackWalk for the specified thread. ShimStackWalk and ICorDebugThread has
// a 1:1 relationship.
ShimStackWalk * LookupOrCreateShimStackWalk(ICorDebugThread * pThread);
// Clear all ShimStackWalks and flush all the caches.
void ClearAllShimStackWalk();
// Get the corresponding ICDProcess object.
ICorDebugProcess * GetProcess();
// Get the data target to access the debuggee.
ICorDebugMutableDataTarget * GetDataTarget();
// Get the native event pipeline
INativeEventPipeline * GetNativePipeline();
// Are we interop-debugging?
bool IsInteropDebugging();
// Finish all the necessary initialization work and queue up any necessary fake attach events before
// dispatching an event.
void PreDispatchEvent(bool fRealCreateProcessEvent = false);
// Retrieve the IP address and the port number of the debugger proxy.
MachineInfo GetMachineInfo();
// Add an entry in the duplicate creation event hash table for the specified key.
void AddDuplicateCreationEvent(void * pKey);
// Check if a duplicate creation event entry exists for the specified key. If so, remove it.
bool RemoveDuplicateCreationEventIfPresent(void * pKey);
void SetMarkAttachPendingEvent();
void SetTerminatingEvent();
RSLock * GetShimLock();
protected:
// Reference count.
LONG m_ref;
//
// Helper functions
//
HRESULT CreateAndStartWin32ET(Cordb * pCordb);
//
// Synchronization events to ensure that AttachPending bit is marked before DebugActiveProcess
// returns or debugger is detaching
//
HANDLE m_markAttachPendingEvent;
HANDLE m_terminatingEvent;
//
// Event Queues
//
// Shim maintains event queue to emulate V2 semantics.
// In V2, IcorDebug internally queued debug events and dispatched them
// once the debuggee was synchronized. In V3, ICorDebug dispatches events immediately.
// The event queue is moved into the shim to build V2 semantics of V3 behavior.
ManagedEventQueue m_eventQueue;
// Lock to protect Shim data structures. This is currently a small lock that
// protects leaf-level structures, but it may grow to protect larger things.
RSLock m_ShimLock;
// Serializes ShimProcess:Dispose() with other ShimProcess functions. For now, this
// cannot be the same as m_ShimLock. See LL_SHIM_PROCESS_DISPOSE_LOCK for more
// information
RSLock m_ShimProcessDisposeLock;
// Sticky bit to do lazy-initialization on the first managed event.
bool m_fFirstManagedEvent;
RSExtSmartPtr<ShimProxyCallback> m_pShimCallback;
// This is for emulating V2 Attach. Initialized to false, and then set to true if we ened to send fake attach events.
// Reset to false once the events are sent. See code:ShimProcess::QueueFakeAttachEventsIfNeeded
bool m_fNeedFakeAttachEvents;
// True if the process was created from an attach (DebugActiveProcess); False if it was launched (CreateProcess)
// This is used to send an Attach IPC event, and also used to provide more specific error codes.
bool m_attached;
// True iff we are in the shim's CreateProcess callback. This is used to determine which hresult to
// return from code:CordbProcess::SetDesiredNGENCompilerFlags so we correctly emulate the behavior of v2.0.
// This is set at the beginning of the callback and cleared in code:CordbProcess::ContinueInternal.
bool m_fInCreateProcess;
// True iff we are in the shim's FakeLoadModule callback. This is used to determine which hresult to
// return from code:CordbModule::SetJITCompilerFlags so we correctly emulate the behavior of v2.0.
// This is set at the beginning of the callback and cleared in code:CordbProcess::ContinueInternal.
bool m_fInLoadModule;
//
// Data
//
// Pointer to CordbProcess.
// @dbgtodo shim: We'd like this to eventually go through public interfaces (ICorDebugProcess)
IProcessShimHooks * m_pProcess; // Reference is kept by m_pIProcess;
RSExtSmartPtr<ICorDebugProcess> m_pIProcess;
// Win32EvenThread, which is the thread that uses the native debug API.
CordbWin32EventThread * m_pWin32EventThread;
// Actual data-target. Since we're shimming V2 scenarios, and V3 is always
// live-debugging, this is always a live data-target.
RSExtSmartPtr<ShimDataTarget> m_pLiveDataTarget;
// If true, the shim is emulating interop-debugging
// If false, the shim is emulating managed-only debugging.
// Both managed and native debugging have the same underlying pipeline (built
// on native-debug events). So the only difference is how they handle those events.
bool m_fIsInteropDebugging;
// true iff Dispose() was called. Consult this and do your work under m_ShimProcessDisposeLock
// to serialize yourself against a call to Dispose(). This protects your work
// from the user doing a Debugger Detach in the middle.
bool m_fIsDisposed;
//.............................................................................
//
// Members used for handling native events when managed-only debugging.
//
//.............................................................................
// Default handler for native events when managed-only debugging.
void DefaultEventHandler(const DEBUG_EVENT * pEvent, DWORD * pdwContinueStatus);
// Given a debug event, track the file handles.
void TrackFileHandleForDebugEvent(const DEBUG_EVENT * pEvent);
// Have we gotten the loader breakpoint yet?
// A Debugger needs to do special work to skip the loader breakpoint,
// and that's also when it should dispatch the faked managed attach events.
bool m_loaderBPReceived;
// Raw callback for ContinueStatusChanged from Data-target.
static HRESULT ContinueStatusChanged(void * pUserData, DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus);
// Real worker to update ContinueStatusChangedData
HRESULT ContinueStatusChangedWorker(DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus);
struct ContinueStatusChangedData
{
void Clear();
bool IsSet();
// Tid of Thread changed
DWORD m_dwThreadId;
// New continue status.
CORDB_CONTINUE_STATUS m_status;
} m_ContinueStatusChangedData;
// the hash table of ShimStackWalks
ShimStackWalkHashTable * m_pShimStackWalkHashTable;
// the hash table of duplicate creation events
DuplicateCreationEventsHashTable * m_pDupeEventsHashTable;
MachineInfo m_machineInfo;
};
//---------------------------------------------------------------------------------------
//
// This is the container class of ShimChains, ICorDebugFrames, ShimChainEnums, and ShimFrameEnums.
// It has a 1:1 relationship with ICorDebugThreads. Upon creation, this class walks the entire stack and
// caches all the stack frames and chains. The enumerators are created on demand.
//
class ShimStackWalk
{
public:
ShimStackWalk(ShimProcess * pProcess, ICorDebugThread * pThread);
~ShimStackWalk();
// These functions do not adjust the reference count.
ICorDebugThread * GetThread();
ShimChain * GetChain(UINT32 index);
ICorDebugFrame * GetFrame(UINT32 index);
// Get the number of frames and chains.
ULONG GetChainCount();
ULONG GetFrameCount();
RSLock * GetShimLock();
// Add ICDChainEnum and ICDFrameEnum.
void AddChainEnum(ShimChainEnum * pChainEnum);
void AddFrameEnum(ShimFrameEnum * pFrameEnum);
// The next two functions are for ShimStackWalkHashTableTraits.
ICorDebugThread * GetKey();
static UINT32 Hash(ICorDebugThread * pThread);
// Check if the specified frame is the leaf frame according to the V2 definition.
BOOL IsLeafFrame(ICorDebugFrame * pFrame);
// Check if the two specified frames are the same. This function checks the SPs, frame address, etc.
// instead of just checking for pointer equality.
BOOL IsSameFrame(ICorDebugFrame * pLeft, ICorDebugFrame * pRight);
// The following functions are entry point into the ShimStackWalk. They are called by the RS.
void EnumerateChains(ICorDebugChainEnum ** ppChainEnum);
void GetActiveChain(ICorDebugChain ** ppChain);
void GetActiveFrame(ICorDebugFrame ** ppFrame);
void GetActiveRegisterSet(ICorDebugRegisterSet ** ppRegisterSet);
void GetChainForFrame(ICorDebugFrame * pFrame, ICorDebugChain ** ppChain);
void GetCallerForFrame(ICorDebugFrame * pFrame, ICorDebugFrame ** ppCallerFrame);
void GetCalleeForFrame(ICorDebugFrame * pFrame, ICorDebugFrame ** ppCalleeFrame);
private:
//---------------------------------------------------------------------------------------
//
// This is a helper class used to store the information of a chain during a stackwalk. A chain is marked
// by the CONTEXT on the leaf boundary and a FramePointer on the root boundary. Also, notice that we
// are keeping two CONTEXTs. This is because some chain types may cancel a previous unmanaged chain.
// For example, a CHAIN_FUNC_EVAL chain cancels any CHAIN_ENTER_UNMANAGED chain immediately preceding
// it. In this case, the leaf boundary of the CHAIN_FUNC_EVAL chain is marked by the CONTEXT of the
// previous CHAIN_ENTER_MANAGED, not the previous CHAIN_ENTER_UNMANAGED.
//
struct ChainInfo
{
public:
ChainInfo() : m_rootFP(LEAF_MOST_FRAME), m_reason(CHAIN_NONE), m_fNeedEnterManagedChain(FALSE), m_fLeafNativeContextIsValid(FALSE) {}
void CancelUMChain() { m_reason = CHAIN_NONE; }
BOOL IsTrackingUMChain() { return (m_reason == CHAIN_ENTER_UNMANAGED); }
DT_CONTEXT m_leafNativeContext;
DT_CONTEXT m_leafManagedContext;
FramePointer m_rootFP;
CorDebugChainReason m_reason;
bool m_fNeedEnterManagedChain;
bool m_fLeafNativeContextIsValid;
};
//---------------------------------------------------------------------------------------
//
// This is a helper class used to store information during a stackwalk. Conceptually it is a simplified
// version of FrameInfo used on the LS in V2.
//
struct StackWalkInfo
{
public:
StackWalkInfo();
~StackWalkInfo();
// Reset all the per-frame information.
void ResetForNextFrame();
// During the stackwalk, we need to find out whether we should process the next stack frame or the
// next internal frame. These functions help us determine whether we have exhausted one or both
// types of frames. The stackwalk is finished when both types are exhausted.
bool ExhaustedAllFrames();
bool ExhaustedAllStackFrames();
bool ExhaustedAllInternalFrames();
// Simple helper function to get the current internal frame.
ICorDebugInternalFrame2 * GetCurrentInternalFrame();
// Check whether we are processing the first frame.
BOOL IsLeafFrame();
// Check whether we are skipping frames because of a child frame.
BOOL IsSkippingFrame();
// Indicates whether we are dealing with a converted frame.
// See code:CordbThread::ConvertFrameForILMethodWithoutMetadata.
BOOL HasConvertedFrame();
// Store the child frame we are currently trying to find the parent frame for.
// If this is NULL, then we are not skipping frames.
RSExtSmartPtr<ICorDebugNativeFrame2> m_pChildFrame;
// Store the converted frame, if any.
RSExtSmartPtr<ICorDebugInternalFrame2> m_pConvertedInternalFrame2;
// Store the array of internal frames. This is an array of RSExtSmartPtrs, and so each element
// is protected, and we only need to call Clear() to release each element and free all the memory.
RSExtPtrArray<ICorDebugInternalFrame2> m_ppInternalFrame2;
UINT32 m_cChain; // number of chains
UINT32 m_cFrame; // number of frames
UINT32 m_firstFrameInChain; // the index of the first frame in the current chain
UINT32 m_cInternalFrames; // number of internal frames
UINT32 m_curInternalFrame; // the index of the current internal frame being processed
CorDebugInternalFrameType m_internalFrameType;
bool m_fExhaustedAllStackFrames;
// Indicate whether we are processing an internal frame or a stack frame.
bool m_fProcessingInternalFrame;
// Indicate whether we should skip the current chain because it's a chain derived from a leaf frame
// of type TYPE_INTERNAL. This is the behaviour in V2.
// See code:DebuggerWalkStackProc.
bool m_fSkipChain;
// Indicate whether the current frame is the first frame we process.
bool m_fLeafFrame;
// Indicate whether we are processing a converted frame.
bool m_fHasConvertedFrame;
};
// A ShimStackWalk is deleted when a process is continued, or when the stack is changed in any way
// (e.g. SetIP, EnC, etc.).
void Populate();
void Clear();
// Get a FramePointer to mark the root boundary of a chain.
FramePointer GetFramePointerForChain(DT_CONTEXT * pContext);
FramePointer GetFramePointerForChain(ICorDebugInternalFrame2 * pInternalFrame2);
CorDebugInternalFrameType GetInternalFrameType(ICorDebugInternalFrame2 * pFrame2);
// Append a frame to the array.
void AppendFrame(ICorDebugFrame * pFrame, StackWalkInfo * pStackWalkInfo);
void AppendFrame(ICorDebugInternalFrame2 * pInternalFrame2, StackWalkInfo * pStackWalkInfo);
// Append a chain to the array.
void AppendChainWorker(StackWalkInfo * pStackWalkInfo,
DT_CONTEXT * pLeafContext,
FramePointer fpRoot,
CorDebugChainReason chainReason,
BOOL fIsManagedChain);
void AppendChain(ChainInfo * pChainInfo, StackWalkInfo * pStackWalkInfo);
// Save information on the ChainInfo regarding the current chain.
void SaveChainContext(ICorDebugStackWalk * pSW, ChainInfo * pChainInfo, DT_CONTEXT * pContext);
// Check what we are process next, a internal frame or a stack frame.
BOOL CheckInternalFrame(ICorDebugFrame * pNextStackFrame,
StackWalkInfo * pStackWalkInfo,
ICorDebugThread3 * pThread3,
ICorDebugStackWalk * pSW);
// Convert an ICDInternalFrame to another ICDInternalFrame due to IL methods without metadata.
// See code:CordbThread::ConvertFrameForILMethodWithoutMetadata.
BOOL ConvertInternalFrameToDynamicMethod(StackWalkInfo * pStackWalkInfo);
// Convert an ICDNativeFrame to an ICDInternalFrame due to IL methods without metadata.
// See code:CordbThread::ConvertFrameForILMethodWithoutMetadata.
BOOL ConvertStackFrameToDynamicMethod(ICorDebugFrame * pFrame, StackWalkInfo * pStackWalkInfo);
// Process an unmanaged chain.
BOOL ShouldTrackUMChain(StackWalkInfo * pswInfo);
void TrackUMChain(ChainInfo * pChainInfo, StackWalkInfo * pStackWalkInfo);
// Check whether the internal frame is a newly exposed type in Arrowhead. If so, then the shim should
// not expose it.
BOOL IsV3FrameType(CorDebugInternalFrameType type);
// Check whether the specified frame represents a dynamic method.
BOOL IsILFrameWithoutMetadata(ICorDebugFrame * pFrame);
CDynArray<ShimChain *> m_stackChains; // growable ordered array of chains and frames
CDynArray<ICorDebugFrame *> m_stackFrames;
ShimChainEnum * m_pChainEnumList; // linked list of ShimChainEnum and ShimFrameEnum
ShimFrameEnum * m_pFrameEnumList;
// the thread on which we are doing a stackwalk, i.e. the "owning" thread
RSExtSmartPtr<ShimProcess> m_pProcess;
RSExtSmartPtr<ICorDebugThread> m_pThread;
};
//---------------------------------------------------------------------------------------
//
// This class implements the deprecated ICDChain interface.
//
class ShimChain : public ICorDebugChain
{
public:
ShimChain(ShimStackWalk * pSW,
DT_CONTEXT * pContext,
FramePointer fpRoot,
UINT32 chainIndex,
UINT32 frameStartIndex,
UINT32 frameEndIndex,
CorDebugChainReason chainReason,
BOOL fIsManaged,
RSLock * pShimLock);
virtual ~ShimChain();
void Neuter();
BOOL IsNeutered();
//
// IUnknown
//
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void ** ppInterface);
//
// ICorDebugChain
//
COM_METHOD GetThread(ICorDebugThread ** ppThread);
COM_METHOD GetStackRange(CORDB_ADDRESS * pStart, CORDB_ADDRESS * pEnd);
COM_METHOD GetContext(ICorDebugContext ** ppContext);
COM_METHOD GetCaller(ICorDebugChain ** ppChain);
COM_METHOD GetCallee(ICorDebugChain ** ppChain);
COM_METHOD GetPrevious(ICorDebugChain ** ppChain);
COM_METHOD GetNext(ICorDebugChain ** ppChain);
COM_METHOD IsManaged(BOOL * pManaged);
COM_METHOD EnumerateFrames(ICorDebugFrameEnum ** ppFrames);
COM_METHOD GetActiveFrame(ICorDebugFrame ** ppFrame);
COM_METHOD GetRegisterSet(ICorDebugRegisterSet ** ppRegisters);
COM_METHOD GetReason(CorDebugChainReason * pReason);
//
// accessors
//
// Get the owning ShimStackWalk.
ShimStackWalk * GetShimStackWalk();
// Get the first and last index of the frame owned by this chain. This class itself doesn't store the
// frames. Rather, the frames are stored on the ShimStackWalk. This class just stores the indices.
// Note that the indices are [firstIndex, lastIndex), i.e. the last index is exclusive.
UINT32 GetFirstFrameIndex();
UINT32 GetLastFrameIndex();
private:
// A chain describes a stack range within the stack. This includes a CONTEXT at the start (leafmost)
// end of the chain, and a frame pointer where the chain ends (rootmost). This stack range is exposed
// publicly via ICDChain::GetStackRange(), and can be used to stitch managed and native stack frames
// together into a unified stack.
DT_CONTEXT m_context; // the leaf end of the chain
FramePointer m_fpRoot; // the root end of the chain
ShimStackWalk * m_pStackWalk; // the owning ShimStackWalk
Volatile<ULONG> m_refCount;
// The 0-based index of this chain in the ShimStackWalk's chain array (m_pStackWalk->m_stackChains).
UINT32 m_chainIndex;
// The 0-based index of the first frame owned by this chain in the ShimStackWalk's frame array
// (m_pStackWalk->m_stackFrames). See code::ShimChain::GetFirstFrameIndex().
UINT32 m_frameStartIndex;
// The 0-based index of the last frame owned by this chain in the ShimStackWalk's frame array
// (m_pStackWalk->m_stackFrames). This index is exlusive. See code::ShimChain::GetLastFrameIndex().
UINT32 m_frameEndIndex;
CorDebugChainReason m_chainReason;
BOOL m_fIsManaged; // indicates whether this chain contains managed frames
BOOL m_fIsNeutered;
RSLock * m_pShimLock; // shim lock from ShimProcess to protect neuteredness checks
};
//---------------------------------------------------------------------------------------
//
// This class implements the deprecated ICDChainEnum interface.
//
class ShimChainEnum : public ICorDebugChainEnum
{
public:
ShimChainEnum(ShimStackWalk * pSW, RSLock * pShimLock);
virtual ~ShimChainEnum();
void Neuter();
BOOL IsNeutered();
//
// IUnknown
//
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void ** ppInterface);
//
// ICorDebugEnum
//
COM_METHOD Skip(ULONG celt);
COM_METHOD Reset();
COM_METHOD Clone(ICorDebugEnum ** ppEnum);
COM_METHOD GetCount(ULONG * pcChains);
//
// ICorDebugChainEnum
//
COM_METHOD Next(ULONG cChains, ICorDebugChain * rgpChains[], ULONG * pcChainsFetched);
//
// accessors
//
// used to link ShimChainEnums in a list
ShimChainEnum * GetNext();
void SetNext(ShimChainEnum * pNext);
private:
ShimStackWalk * m_pStackWalk; // the owning ShimStackWalk
// This points to the next ShimChainEnum in the linked list of ShimChainEnums to be cleaned up.
// The head of the list is on the ShimStackWalk (m_pStackWalk->m_pChainEnumList).
ShimChainEnum * m_pNext;
UINT32 m_currentChainIndex; // the index of the current ShimChain being enumerated
Volatile<ULONG> m_refCount;
BOOL m_fIsNeutered;
RSLock * m_pShimLock; // shim lock from ShimProcess to protect neuteredness checks
};
//---------------------------------------------------------------------------------------
//
// This class implements the deprecated ICDFrameEnum interface.
//
class ShimFrameEnum : public ICorDebugFrameEnum
{
public:
ShimFrameEnum(ShimStackWalk * pSW, ShimChain * pChain, UINT32 frameStartIndex, UINT32 frameEndIndex, RSLock * pShimLock);
virtual ~ShimFrameEnum();
void Neuter();
BOOL IsNeutered();
//
// IUnknown
//
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
COM_METHOD QueryInterface(REFIID riid, void ** ppInterface);
//
// ICorDebugEnum
//
COM_METHOD Skip(ULONG celt);
COM_METHOD Reset();
COM_METHOD Clone(ICorDebugEnum ** ppEnum);
COM_METHOD GetCount(ULONG * pcFrames);
//
// ICorDebugFrameEnum
//
COM_METHOD Next(ULONG cFrames, ICorDebugFrame * rgpFrames[], ULONG * pcFramesFetched);
//
// accessors
//
// used to link ShimChainEnums in a list
ShimFrameEnum * GetNext();
void SetNext(ShimFrameEnum * pNext);
private:
ShimStackWalk * m_pStackWalk; // the owning ShimStackWalk
ShimChain * m_pChain; // the owning ShimChain
RSLock * m_pShimLock; // shim lock from ShimProcess to protect neuteredness checks
// This points to the next ShimFrameEnum in the linked list of ShimFrameEnums to be cleaned up.
// The head of the list is on the ShimStackWalk (m_pStackWalk->m_pFrameEnumList).
ShimFrameEnum * m_pNext;
UINT32 m_currentFrameIndex; // the current ICDFrame being enumerated
UINT32 m_endFrameIndex; // the last index (exclusive) of the frame owned by the chain;
// see code:ShimChain::GetLastFrameIndex
Volatile<ULONG> m_refCount;
BOOL m_fIsNeutered;
};
#endif // SHIMPRIV_H
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#if defined(TARGET_AMD64) || defined(TARGET_X86)
struct T_RUNTIME_FUNCTION {
uint32_t BeginAddress;
uint32_t EndAddress;
uint32_t UnwindInfoAddress;
};
#elif defined(TARGET_ARM)
struct T_RUNTIME_FUNCTION {
uint32_t BeginAddress;
uint32_t UnwindData;
};
#elif defined(TARGET_ARM64)
struct T_RUNTIME_FUNCTION {
uint32_t BeginAddress;
union {
uint32_t UnwindData;
struct {
uint32_t Flag : 2;
uint32_t FunctionLength : 11;
uint32_t RegF : 3;
uint32_t RegI : 4;
uint32_t H : 1;
uint32_t CR : 2;
uint32_t FrameSize : 9;
} PackedUnwindData;
};
};
#else
#error unexpected target architecture
#endif
typedef DPTR(T_RUNTIME_FUNCTION) PTR_RUNTIME_FUNCTION;
class CoffNativeCodeManager : public ICodeManager
{
TADDR m_moduleBase;
PTR_VOID m_pvManagedCodeStartRange;
uint32_t m_cbManagedCodeRange;
PTR_RUNTIME_FUNCTION m_pRuntimeFunctionTable;
uint32_t m_nRuntimeFunctionTable;
PTR_PTR_VOID m_pClasslibFunctions;
uint32_t m_nClasslibFunctions;
public:
CoffNativeCodeManager(TADDR moduleBase,
PTR_VOID pvManagedCodeStartRange, uint32_t cbManagedCodeRange,
PTR_RUNTIME_FUNCTION pRuntimeFunctionTable, uint32_t nRuntimeFunctionTable,
PTR_PTR_VOID pClasslibFunctions, uint32_t nClasslibFunctions);
~CoffNativeCodeManager();
//
// Code manager methods
//
bool FindMethodInfo(PTR_VOID ControlPC,
MethodInfo * pMethodInfoOut);
bool IsFunclet(MethodInfo * pMethodInfo);
bool IsFilter(MethodInfo * pMethodInfo);
PTR_VOID GetFramePointer(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet);
void EnumGcRefs(MethodInfo * pMethodInfo,
PTR_VOID safePointAddress,
REGDISPLAY * pRegisterSet,
GCEnumContext * hCallback);
bool UnwindStackFrame(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in/out
PTR_VOID * ppPreviousTransitionFrame); // out
uintptr_t GetConservativeUpperBoundForOutgoingArgs(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet);
bool GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in
PTR_PTR_VOID * ppvRetAddrLocation, // out
GCRefKind * pRetValueKind); // out
void UnsynchronizedHijackMethodLoops(MethodInfo * pMethodInfo);
PTR_VOID RemapHardwareFaultToGCSafePoint(MethodInfo * pMethodInfo, PTR_VOID controlPC);
bool EHEnumInit(MethodInfo * pMethodInfo, PTR_VOID * pMethodStartAddress, EHEnumState * pEHEnumState);
bool EHEnumNext(EHEnumState * pEHEnumState, EHClause * pEHClause);
PTR_VOID GetMethodStartAddress(MethodInfo * pMethodInfo);
void * GetClasslibFunction(ClasslibFunctionId functionId);
PTR_VOID GetAssociatedData(PTR_VOID ControlPC);
PTR_VOID GetOsModuleHandle();
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#if defined(TARGET_AMD64) || defined(TARGET_X86)
struct T_RUNTIME_FUNCTION {
uint32_t BeginAddress;
uint32_t EndAddress;
uint32_t UnwindInfoAddress;
};
#elif defined(TARGET_ARM)
struct T_RUNTIME_FUNCTION {
uint32_t BeginAddress;
uint32_t UnwindData;
};
#elif defined(TARGET_ARM64)
struct T_RUNTIME_FUNCTION {
uint32_t BeginAddress;
union {
uint32_t UnwindData;
struct {
uint32_t Flag : 2;
uint32_t FunctionLength : 11;
uint32_t RegF : 3;
uint32_t RegI : 4;
uint32_t H : 1;
uint32_t CR : 2;
uint32_t FrameSize : 9;
} PackedUnwindData;
};
};
#else
#error unexpected target architecture
#endif
typedef DPTR(T_RUNTIME_FUNCTION) PTR_RUNTIME_FUNCTION;
class CoffNativeCodeManager : public ICodeManager
{
TADDR m_moduleBase;
PTR_VOID m_pvManagedCodeStartRange;
uint32_t m_cbManagedCodeRange;
PTR_RUNTIME_FUNCTION m_pRuntimeFunctionTable;
uint32_t m_nRuntimeFunctionTable;
PTR_PTR_VOID m_pClasslibFunctions;
uint32_t m_nClasslibFunctions;
public:
CoffNativeCodeManager(TADDR moduleBase,
PTR_VOID pvManagedCodeStartRange, uint32_t cbManagedCodeRange,
PTR_RUNTIME_FUNCTION pRuntimeFunctionTable, uint32_t nRuntimeFunctionTable,
PTR_PTR_VOID pClasslibFunctions, uint32_t nClasslibFunctions);
~CoffNativeCodeManager();
//
// Code manager methods
//
bool FindMethodInfo(PTR_VOID ControlPC,
MethodInfo * pMethodInfoOut);
bool IsFunclet(MethodInfo * pMethodInfo);
bool IsFilter(MethodInfo * pMethodInfo);
PTR_VOID GetFramePointer(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet);
void EnumGcRefs(MethodInfo * pMethodInfo,
PTR_VOID safePointAddress,
REGDISPLAY * pRegisterSet,
GCEnumContext * hCallback);
bool UnwindStackFrame(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in/out
PTR_VOID * ppPreviousTransitionFrame); // out
uintptr_t GetConservativeUpperBoundForOutgoingArgs(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet);
bool GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in
PTR_PTR_VOID * ppvRetAddrLocation, // out
GCRefKind * pRetValueKind); // out
void UnsynchronizedHijackMethodLoops(MethodInfo * pMethodInfo);
PTR_VOID RemapHardwareFaultToGCSafePoint(MethodInfo * pMethodInfo, PTR_VOID controlPC);
bool EHEnumInit(MethodInfo * pMethodInfo, PTR_VOID * pMethodStartAddress, EHEnumState * pEHEnumState);
bool EHEnumNext(EHEnumState * pEHEnumState, EHClause * pEHClause);
PTR_VOID GetMethodStartAddress(MethodInfo * pMethodInfo);
void * GetClasslibFunction(ClasslibFunctionId functionId);
PTR_VOID GetAssociatedData(PTR_VOID ControlPC);
PTR_VOID GetOsModuleHandle();
};
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/pal/tests/palsuite/locale_info/GetLocaleInfoW/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests that GetLocaleInfoW gives the correction information for
** LOCALE_NEUTRAL.
**
**
**==========================================================================*/
#include <palsuite.h>
int Types[] = { LOCALE_SDECIMAL, LOCALE_STHOUSAND, LOCALE_ILZERO,
LOCALE_SCURRENCY, LOCALE_SMONDECIMALSEP, LOCALE_SMONTHOUSANDSEP };
char *TypeStrings[] = { "LOCALE_SDECIMAL", "LOCALE_STHOUSAND", "LOCALE_ILZERO",
"LOCALE_SCURRENCY", "LOCALE_SMONDECIMALSEP", "LOCALE_SMONTHOUSANDSEP" };
typedef WCHAR InfoStrings[ARRAY_SIZE(Types)][4];
typedef struct
{
LCID lcid;
InfoStrings Strings;
} LocalInfoType;
LocalInfoType Locales[] =
{
{LOCALE_NEUTRAL,
{{'.',0}, {',',0}, {'1',0}, {'$',0}, {'.',0}, {',',0}}},
};
int NumLocales = sizeof(Locales) / sizeof(Locales[0]);
PALTEST(locale_info_GetLocaleInfoW_test1_paltest_getlocaleinfow_test1, "locale_info/GetLocaleInfoW/test1/paltest_getlocaleinfow_test1")
{
WCHAR buffer[256] = { 0 };
int ret;
int i,j;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
for (i=0; i<NumLocales; i++)
{
for (j=0; j < ARRAY_SIZE(Types); j++)
{
ret = GetLocaleInfoW(Locales[i].lcid, Types[j], buffer, 256);
if (ret == 0)
{
Fail("GetLocaleInfoW returned an unexpected error!\n");
}
if (wcscmp(buffer, Locales[i].Strings[j]) != 0)
{
Fail("GetLocaleInfoW gave incorrect result for %s, "
"locale %#x:\nExpected \"%S\", got \"%S\"!\n", TypeStrings[j],
Locales[i].lcid, Locales[i].Strings[j], buffer);
}
if (ret != wcslen(Locales[i].Strings[j]) + 1)
{
Fail("GetLocaleInfoW returned incorrect value for %s, "
"locale %#x:\nExpected %d, got %d!\n", TypeStrings[j],
Locales[i].lcid, wcslen(Locales[i].Strings[j])+1, ret);
}
}
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests that GetLocaleInfoW gives the correction information for
** LOCALE_NEUTRAL.
**
**
**==========================================================================*/
#include <palsuite.h>
int Types[] = { LOCALE_SDECIMAL, LOCALE_STHOUSAND, LOCALE_ILZERO,
LOCALE_SCURRENCY, LOCALE_SMONDECIMALSEP, LOCALE_SMONTHOUSANDSEP };
char *TypeStrings[] = { "LOCALE_SDECIMAL", "LOCALE_STHOUSAND", "LOCALE_ILZERO",
"LOCALE_SCURRENCY", "LOCALE_SMONDECIMALSEP", "LOCALE_SMONTHOUSANDSEP" };
typedef WCHAR InfoStrings[ARRAY_SIZE(Types)][4];
typedef struct
{
LCID lcid;
InfoStrings Strings;
} LocalInfoType;
LocalInfoType Locales[] =
{
{LOCALE_NEUTRAL,
{{'.',0}, {',',0}, {'1',0}, {'$',0}, {'.',0}, {',',0}}},
};
int NumLocales = sizeof(Locales) / sizeof(Locales[0]);
PALTEST(locale_info_GetLocaleInfoW_test1_paltest_getlocaleinfow_test1, "locale_info/GetLocaleInfoW/test1/paltest_getlocaleinfow_test1")
{
WCHAR buffer[256] = { 0 };
int ret;
int i,j;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
for (i=0; i<NumLocales; i++)
{
for (j=0; j < ARRAY_SIZE(Types); j++)
{
ret = GetLocaleInfoW(Locales[i].lcid, Types[j], buffer, 256);
if (ret == 0)
{
Fail("GetLocaleInfoW returned an unexpected error!\n");
}
if (wcscmp(buffer, Locales[i].Strings[j]) != 0)
{
Fail("GetLocaleInfoW gave incorrect result for %s, "
"locale %#x:\nExpected \"%S\", got \"%S\"!\n", TypeStrings[j],
Locales[i].lcid, Locales[i].Strings[j], buffer);
}
if (ret != wcslen(Locales[i].Strings[j]) + 1)
{
Fail("GetLocaleInfoW returned incorrect value for %s, "
"locale %#x:\nExpected %d, got %d!\n", TypeStrings[j],
Locales[i].lcid, wcslen(Locales[i].Strings[j])+1, ret);
}
}
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/native/external/rapidjson/memorybuffer.h | // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_MEMORYBUFFER_H_
#define RAPIDJSON_MEMORYBUFFER_H_
#include "stream.h"
#include "internal/stack.h"
RAPIDJSON_NAMESPACE_BEGIN
//! Represents an in-memory output byte stream.
/*!
This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream.
It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file.
Differences between MemoryBuffer and StringBuffer:
1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer.
2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator.
\tparam Allocator type for allocating memory buffer.
\note implements Stream concept
*/
template <typename Allocator = CrtAllocator>
struct GenericMemoryBuffer {
typedef char Ch; // byte
GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
void Flush() {}
void Clear() { stack_.Clear(); }
void ShrinkToFit() { stack_.ShrinkToFit(); }
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
const Ch* GetBuffer() const {
return stack_.template Bottom<Ch>();
}
size_t GetSize() const { return stack_.GetSize(); }
static const size_t kDefaultCapacity = 256;
mutable internal::Stack<Allocator> stack_;
};
typedef GenericMemoryBuffer<> MemoryBuffer;
//! Implement specialized version of PutN() with memset() for better performance.
template<>
inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) {
std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c));
}
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_MEMORYBUFFER_H_
| // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_MEMORYBUFFER_H_
#define RAPIDJSON_MEMORYBUFFER_H_
#include "stream.h"
#include "internal/stack.h"
RAPIDJSON_NAMESPACE_BEGIN
//! Represents an in-memory output byte stream.
/*!
This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream.
It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file.
Differences between MemoryBuffer and StringBuffer:
1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer.
2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator.
\tparam Allocator type for allocating memory buffer.
\note implements Stream concept
*/
template <typename Allocator = CrtAllocator>
struct GenericMemoryBuffer {
typedef char Ch; // byte
GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
void Flush() {}
void Clear() { stack_.Clear(); }
void ShrinkToFit() { stack_.ShrinkToFit(); }
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
const Ch* GetBuffer() const {
return stack_.template Bottom<Ch>();
}
size_t GetSize() const { return stack_.GetSize(); }
static const size_t kDefaultCapacity = 256;
mutable internal::Stack<Allocator> stack_;
};
typedef GenericMemoryBuffer<> MemoryBuffer;
//! Implement specialized version of PutN() with memset() for better performance.
template<>
inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) {
std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c));
}
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_MEMORYBUFFER_H_
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/libraries/System.Configuration.ConfigurationManager/src/System/Configuration/ConfigurationValues.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Specialized;
namespace System.Configuration
{
internal sealed class ConfigurationValues : NameObjectCollectionBase
{
private static volatile IEnumerable s_emptyCollection;
private BaseConfigurationRecord _configRecord;
private volatile bool _containsElement;
private volatile bool _containsInvalidValue;
internal ConfigurationValues() : base(StringComparer.Ordinal) { }
internal object this[string key]
{
get
{
ConfigurationValue configValue = GetConfigValue(key);
return configValue?.Value;
}
set { SetValue(key, value, ConfigurationValueFlags.Modified, null); }
}
internal object this[int index]
{
get
{
ConfigurationValue configValue = GetConfigValue(index);
return configValue?.Value;
}
}
internal object SyncRoot => this;
internal IEnumerable ConfigurationElements
=> _containsElement ? new ConfigurationElementsCollection(this) : EmptyCollectionInstance;
internal IEnumerable InvalidValues
=> _containsInvalidValue ? new InvalidValuesCollection(this) : EmptyCollectionInstance;
private static IEnumerable EmptyCollectionInstance
=> s_emptyCollection ?? (s_emptyCollection = new EmptyCollection());
internal void AssociateContext(BaseConfigurationRecord configRecord)
{
_configRecord = configRecord;
// Associate with children
foreach (ConfigurationElement currentElement in ConfigurationElements)
currentElement.AssociateContext(_configRecord);
}
internal bool Contains(string key)
{
return BaseGet(key) != null;
}
internal string GetKey(int index)
{
return BaseGetKey(index);
}
internal ConfigurationValue GetConfigValue(string key)
{
return (ConfigurationValue)BaseGet(key);
}
internal ConfigurationValue GetConfigValue(int index)
{
return (ConfigurationValue)BaseGet(index);
}
internal PropertySourceInfo GetSourceInfo(string key)
{
ConfigurationValue configurationValue = GetConfigValue(key);
return configurationValue?.SourceInfo;
}
internal void ChangeSourceInfo(string key, PropertySourceInfo sourceInfo)
{
ConfigurationValue configurationValue = GetConfigValue(key);
if (configurationValue != null) configurationValue.SourceInfo = sourceInfo;
}
private ConfigurationValue CreateConfigValue(object value, ConfigurationValueFlags valueFlags,
PropertySourceInfo sourceInfo)
{
if (value != null)
{
if (value is ConfigurationElement)
{
_containsElement = true;
((ConfigurationElement)value).AssociateContext(_configRecord);
}
else
{
if (value is InvalidPropValue) _containsInvalidValue = true;
}
}
ConfigurationValue configValue = new ConfigurationValue(value, valueFlags, sourceInfo);
return configValue;
}
internal void SetValue(string key, object value, ConfigurationValueFlags valueFlags,
PropertySourceInfo sourceInfo)
{
ConfigurationValue configValue = CreateConfigValue(value, valueFlags, sourceInfo);
BaseSet(key, configValue);
}
internal void Clear()
{
BaseClear();
}
internal ConfigurationValueFlags RetrieveFlags(string key)
{
ConfigurationValue configurationValue = (ConfigurationValue)BaseGet(key);
return configurationValue?.ValueFlags ?? ConfigurationValueFlags.Default;
}
internal bool IsModified(string key)
{
ConfigurationValue configurationValue = (ConfigurationValue)BaseGet(key);
if (configurationValue != null)
return (configurationValue.ValueFlags & ConfigurationValueFlags.Modified) != 0;
return false;
}
internal bool IsInherited(string key)
{
ConfigurationValue configurationValue = (ConfigurationValue)BaseGet(key);
if (configurationValue != null)
return (configurationValue.ValueFlags & ConfigurationValueFlags.Inherited) != 0;
return false;
}
private sealed class EmptyCollection : IEnumerable
{
private readonly IEnumerator _emptyEnumerator;
internal EmptyCollection()
{
_emptyEnumerator = new EmptyCollectionEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return _emptyEnumerator;
}
private sealed class EmptyCollectionEnumerator : IEnumerator
{
bool IEnumerator.MoveNext()
{
return false;
}
object IEnumerator.Current => null;
void IEnumerator.Reset() { }
}
}
private sealed class ConfigurationElementsCollection : IEnumerable
{
private readonly ConfigurationValues _values;
internal ConfigurationElementsCollection(ConfigurationValues values)
{
_values = values;
}
IEnumerator IEnumerable.GetEnumerator()
{
if (_values._containsElement)
{
for (int index = 0; index < _values.Count; index++)
{
object value = _values[index];
if (value is ConfigurationElement) yield return value;
}
}
}
}
private sealed class InvalidValuesCollection : IEnumerable
{
private readonly ConfigurationValues _values;
internal InvalidValuesCollection(ConfigurationValues values)
{
_values = values;
}
IEnumerator IEnumerable.GetEnumerator()
{
if (_values._containsInvalidValue)
{
for (int index = 0; index < _values.Count; index++)
{
object value = _values[index];
if (value is InvalidPropValue) yield return value;
}
}
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Specialized;
namespace System.Configuration
{
internal sealed class ConfigurationValues : NameObjectCollectionBase
{
private static volatile IEnumerable s_emptyCollection;
private BaseConfigurationRecord _configRecord;
private volatile bool _containsElement;
private volatile bool _containsInvalidValue;
internal ConfigurationValues() : base(StringComparer.Ordinal) { }
internal object this[string key]
{
get
{
ConfigurationValue configValue = GetConfigValue(key);
return configValue?.Value;
}
set { SetValue(key, value, ConfigurationValueFlags.Modified, null); }
}
internal object this[int index]
{
get
{
ConfigurationValue configValue = GetConfigValue(index);
return configValue?.Value;
}
}
internal object SyncRoot => this;
internal IEnumerable ConfigurationElements
=> _containsElement ? new ConfigurationElementsCollection(this) : EmptyCollectionInstance;
internal IEnumerable InvalidValues
=> _containsInvalidValue ? new InvalidValuesCollection(this) : EmptyCollectionInstance;
private static IEnumerable EmptyCollectionInstance
=> s_emptyCollection ?? (s_emptyCollection = new EmptyCollection());
internal void AssociateContext(BaseConfigurationRecord configRecord)
{
_configRecord = configRecord;
// Associate with children
foreach (ConfigurationElement currentElement in ConfigurationElements)
currentElement.AssociateContext(_configRecord);
}
internal bool Contains(string key)
{
return BaseGet(key) != null;
}
internal string GetKey(int index)
{
return BaseGetKey(index);
}
internal ConfigurationValue GetConfigValue(string key)
{
return (ConfigurationValue)BaseGet(key);
}
internal ConfigurationValue GetConfigValue(int index)
{
return (ConfigurationValue)BaseGet(index);
}
internal PropertySourceInfo GetSourceInfo(string key)
{
ConfigurationValue configurationValue = GetConfigValue(key);
return configurationValue?.SourceInfo;
}
internal void ChangeSourceInfo(string key, PropertySourceInfo sourceInfo)
{
ConfigurationValue configurationValue = GetConfigValue(key);
if (configurationValue != null) configurationValue.SourceInfo = sourceInfo;
}
private ConfigurationValue CreateConfigValue(object value, ConfigurationValueFlags valueFlags,
PropertySourceInfo sourceInfo)
{
if (value != null)
{
if (value is ConfigurationElement)
{
_containsElement = true;
((ConfigurationElement)value).AssociateContext(_configRecord);
}
else
{
if (value is InvalidPropValue) _containsInvalidValue = true;
}
}
ConfigurationValue configValue = new ConfigurationValue(value, valueFlags, sourceInfo);
return configValue;
}
internal void SetValue(string key, object value, ConfigurationValueFlags valueFlags,
PropertySourceInfo sourceInfo)
{
ConfigurationValue configValue = CreateConfigValue(value, valueFlags, sourceInfo);
BaseSet(key, configValue);
}
internal void Clear()
{
BaseClear();
}
internal ConfigurationValueFlags RetrieveFlags(string key)
{
ConfigurationValue configurationValue = (ConfigurationValue)BaseGet(key);
return configurationValue?.ValueFlags ?? ConfigurationValueFlags.Default;
}
internal bool IsModified(string key)
{
ConfigurationValue configurationValue = (ConfigurationValue)BaseGet(key);
if (configurationValue != null)
return (configurationValue.ValueFlags & ConfigurationValueFlags.Modified) != 0;
return false;
}
internal bool IsInherited(string key)
{
ConfigurationValue configurationValue = (ConfigurationValue)BaseGet(key);
if (configurationValue != null)
return (configurationValue.ValueFlags & ConfigurationValueFlags.Inherited) != 0;
return false;
}
private sealed class EmptyCollection : IEnumerable
{
private readonly IEnumerator _emptyEnumerator;
internal EmptyCollection()
{
_emptyEnumerator = new EmptyCollectionEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return _emptyEnumerator;
}
private sealed class EmptyCollectionEnumerator : IEnumerator
{
bool IEnumerator.MoveNext()
{
return false;
}
object IEnumerator.Current => null;
void IEnumerator.Reset() { }
}
}
private sealed class ConfigurationElementsCollection : IEnumerable
{
private readonly ConfigurationValues _values;
internal ConfigurationElementsCollection(ConfigurationValues values)
{
_values = values;
}
IEnumerator IEnumerable.GetEnumerator()
{
if (_values._containsElement)
{
for (int index = 0; index < _values.Count; index++)
{
object value = _values[index];
if (value is ConfigurationElement) yield return value;
}
}
}
}
private sealed class InvalidValuesCollection : IEnumerable
{
private readonly ConfigurationValues _values;
internal InvalidValuesCollection(ConfigurationValues values)
{
_values = values;
}
IEnumerator IEnumerable.GetEnumerator()
{
if (_values._containsInvalidValue)
{
for (int index = 0; index < _values.Count; index++)
{
object value = _values[index];
if (value is InvalidPropValue) yield return value;
}
}
}
}
}
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/libraries/System.Runtime/tests/System/IO/EndOfStreamExceptionTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.IO;
using Xunit;
namespace System.Tests
{
public static class EndOfStreamExceptionTests
{
private const string exceptionMessage = "Created an exception";
private const string innerExceptionMessage = "Created an InnerException";
private const uint COR_E_ENDOFSTREAM = 0x80070026;
[Fact]
public static void EndOfStreamException_Ctor_Empty()
{
EndOfStreamException i = new EndOfStreamException();
Assert.Equal(COR_E_ENDOFSTREAM, unchecked((uint)i.HResult));
}
[Fact]
public static void EndOfStreamException_Ctor_String()
{
EndOfStreamException i = new EndOfStreamException(exceptionMessage);
Assert.Equal(exceptionMessage, i.Message);
Assert.Equal(COR_E_ENDOFSTREAM, unchecked((uint)i.HResult));
}
[Fact]
public static void EndOfStreamException_Ctor_String_Exception()
{
Exception ex = new Exception(innerExceptionMessage);
EndOfStreamException i = new EndOfStreamException(exceptionMessage, ex);
Assert.Equal(exceptionMessage, i.Message);
Assert.Equal(innerExceptionMessage, i.InnerException.Message);
Assert.Equal(ex.HResult, i.InnerException.HResult);
Assert.Equal(COR_E_ENDOFSTREAM, unchecked((uint)i.HResult));
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.IO;
using Xunit;
namespace System.Tests
{
public static class EndOfStreamExceptionTests
{
private const string exceptionMessage = "Created an exception";
private const string innerExceptionMessage = "Created an InnerException";
private const uint COR_E_ENDOFSTREAM = 0x80070026;
[Fact]
public static void EndOfStreamException_Ctor_Empty()
{
EndOfStreamException i = new EndOfStreamException();
Assert.Equal(COR_E_ENDOFSTREAM, unchecked((uint)i.HResult));
}
[Fact]
public static void EndOfStreamException_Ctor_String()
{
EndOfStreamException i = new EndOfStreamException(exceptionMessage);
Assert.Equal(exceptionMessage, i.Message);
Assert.Equal(COR_E_ENDOFSTREAM, unchecked((uint)i.HResult));
}
[Fact]
public static void EndOfStreamException_Ctor_String_Exception()
{
Exception ex = new Exception(innerExceptionMessage);
EndOfStreamException i = new EndOfStreamException(exceptionMessage, ex);
Assert.Equal(exceptionMessage, i.Message);
Assert.Equal(innerExceptionMessage, i.InnerException.Message);
Assert.Equal(ex.HResult, i.InnerException.HResult);
Assert.Equal(COR_E_ENDOFSTREAM, unchecked((uint)i.HResult));
}
}
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/tests/Loader/classloader/regressions/dev10_568786/4_Misc/ConstrainedMethods.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="ConstrainedMethods.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="ConstrainedMethods.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/libraries/Common/src/Interop/Unix/System.Native/Interop.GetUnixName.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
internal static partial class Interop
{
internal static partial class Sys
{
[GeneratedDllImport(Libraries.SystemNative, EntryPoint = "SystemNative_GetUnixName")]
private static partial IntPtr GetUnixNamePrivate();
internal static string GetUnixName()
{
IntPtr ptr = GetUnixNamePrivate();
return Marshal.PtrToStringAnsi(ptr)!;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
internal static partial class Interop
{
internal static partial class Sys
{
[GeneratedDllImport(Libraries.SystemNative, EntryPoint = "SystemNative_GetUnixName")]
private static partial IntPtr GetUnixNamePrivate();
internal static string GetUnixName()
{
IntPtr ptr = GetUnixNamePrivate();
return Marshal.PtrToStringAnsi(ptr)!;
}
}
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/tests/JIT/HardwareIntrinsics/General/Vector64/LessThanOrEqualAll.Int16.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void LessThanOrEqualAllInt16()
{
var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Int16[] inArray1, Int16[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int16> _fld1;
public Vector64<Int16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16 testClass)
{
var result = Vector64.LessThanOrEqualAll(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Vector64<Int16> _clsVar1;
private static Vector64<Int16> _clsVar2;
private Vector64<Int16> _fld1;
private Vector64<Int16> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
}
public VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector64.LessThanOrEqualAll(
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector64).GetMethod(nameof(Vector64.LessThanOrEqualAll), new Type[] {
typeof(Vector64<Int16>),
typeof(Vector64<Int16>)
});
if (method is null)
{
method = typeof(Vector64).GetMethod(nameof(Vector64.LessThanOrEqualAll), 1, new Type[] {
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int16));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector64.LessThanOrEqualAll(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr);
var result = Vector64.LessThanOrEqualAll(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16();
var result = Vector64.LessThanOrEqualAll(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector64.LessThanOrEqualAll(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector64.LessThanOrEqualAll(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op2, bool result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Int16[] left, Int16[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = true;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult &= (left[i] <= right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.LessThanOrEqualAll)}<Int16>(Vector64<Int16>, Vector64<Int16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void LessThanOrEqualAllInt16()
{
var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Int16[] inArray1, Int16[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int16> _fld1;
public Vector64<Int16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16 testClass)
{
var result = Vector64.LessThanOrEqualAll(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Vector64<Int16> _clsVar1;
private static Vector64<Int16> _clsVar2;
private Vector64<Int16> _fld1;
private Vector64<Int16> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
}
public VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector64.LessThanOrEqualAll(
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector64).GetMethod(nameof(Vector64.LessThanOrEqualAll), new Type[] {
typeof(Vector64<Int16>),
typeof(Vector64<Int16>)
});
if (method is null)
{
method = typeof(Vector64).GetMethod(nameof(Vector64.LessThanOrEqualAll), 1, new Type[] {
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int16));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector64.LessThanOrEqualAll(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr);
var result = Vector64.LessThanOrEqualAll(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt16();
var result = Vector64.LessThanOrEqualAll(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector64.LessThanOrEqualAll(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector64.LessThanOrEqualAll(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op2, bool result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Int16[] left, Int16[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = true;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult &= (left[i] <= right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.LessThanOrEqualAll)}<Int16>(Vector64<Int16>, Vector64<Int16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest213/Generated213.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated213 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct263`2<T0, T1>
extends [mscorlib]System.ValueType
implements class IBase1`1<class BaseClass1>, IBase0
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method4() cil managed noinlining {
ldstr "MyStruct263::Method4.2018()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ret
}
.method public hidebysig newslot virtual instance string Method5() cil managed noinlining {
ldstr "MyStruct263::Method5.2020()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "MyStruct263::Method6.2021<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "MyStruct263::Method6.MI.2022<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string Method0() cil managed noinlining {
ldstr "MyStruct263::Method0.2023()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method0'() cil managed noinlining {
.override method instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ret
}
.method public hidebysig newslot virtual instance string Method1() cil managed noinlining {
ldstr "MyStruct263::Method1.2025()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining {
.override method instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ret
}
.method public hidebysig virtual instance string Method2<M0>() cil managed noinlining {
ldstr "MyStruct263::Method2.2027<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method3<M0>() cil managed noinlining {
ldstr "MyStruct263::Method3.2028<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod533() cil managed noinlining {
ldstr "MyStruct263::ClassMethod533.2029()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class interface public abstract IBase0
{
.method public hidebysig newslot abstract virtual instance string Method0() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method1() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated213 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.T.T<T0,T1,(valuetype MyStruct263`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.T.T<T0,T1,(valuetype MyStruct263`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.A.T<T1,(valuetype MyStruct263`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.A.T<T1,(valuetype MyStruct263`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.A.A<(valuetype MyStruct263`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.A.A<(valuetype MyStruct263`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.A.B<(valuetype MyStruct263`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.A.B<(valuetype MyStruct263`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.B.T<T1,(valuetype MyStruct263`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.B.T<T1,(valuetype MyStruct263`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.B.A<(valuetype MyStruct263`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.B.A<(valuetype MyStruct263`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.B.B<(valuetype MyStruct263`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.B.B<(valuetype MyStruct263`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloca V_3
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ToString() pop
pop
ldloc V_3
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_3
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_3
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloca V_4
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ToString() pop
pop
ldloc V_4
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_4
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_4
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_5
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_6
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
.try { ldloc V_7
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV12
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12:
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV13
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13:
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV14
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV15
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15:
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV16
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16:
.try { ldloc V_8
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV17
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17:
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV18
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18:
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV19
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_9)
ldloca V_9
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
.try { ldloc V_9
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_9
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_9
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.A<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_10)
ldloca V_10
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
.try { ldloc V_10
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_10
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_10
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.B<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_11)
ldloca V_11
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
.try { ldloc V_11
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_11
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_11
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.A<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_12)
ldloca V_12
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
.try { ldloc V_12
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.try { ldloc V_12
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_12
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.B<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_13)
ldloca V_13
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0> ldnull
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ToString() calli default string(object) pop
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_14)
ldloca V_14
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1> ldnull
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ToString() calli default string(object) pop
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_15)
ldloca V_15
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0> ldnull
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ToString() calli default string(object) pop
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_16)
ldloca V_16
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1> ldnull
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ToString() calli default string(object) pop
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated213::MethodCallingTest()
call void Generated213::ConstrainedCallsTest()
call void Generated213::StructConstrainedInterfaceCallsTest()
call void Generated213::CalliTest()
ldc.i4 100
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated213 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct263`2<T0, T1>
extends [mscorlib]System.ValueType
implements class IBase1`1<class BaseClass1>, IBase0
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method4() cil managed noinlining {
ldstr "MyStruct263::Method4.2018()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ret
}
.method public hidebysig newslot virtual instance string Method5() cil managed noinlining {
ldstr "MyStruct263::Method5.2020()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "MyStruct263::Method6.2021<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "MyStruct263::Method6.MI.2022<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string Method0() cil managed noinlining {
ldstr "MyStruct263::Method0.2023()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method0'() cil managed noinlining {
.override method instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ret
}
.method public hidebysig newslot virtual instance string Method1() cil managed noinlining {
ldstr "MyStruct263::Method1.2025()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining {
.override method instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ret
}
.method public hidebysig virtual instance string Method2<M0>() cil managed noinlining {
ldstr "MyStruct263::Method2.2027<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method3<M0>() cil managed noinlining {
ldstr "MyStruct263::Method3.2028<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod533() cil managed noinlining {
ldstr "MyStruct263::ClassMethod533.2029()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class interface public abstract IBase0
{
.method public hidebysig newslot abstract virtual instance string Method0() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method1() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated213 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.T.T<T0,T1,(valuetype MyStruct263`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.T.T<T0,T1,(valuetype MyStruct263`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<!!T0,!!T1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.A.T<T1,(valuetype MyStruct263`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.A.T<T1,(valuetype MyStruct263`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,!!T1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.A.A<(valuetype MyStruct263`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.A.A<(valuetype MyStruct263`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.A.B<(valuetype MyStruct263`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.A.B<(valuetype MyStruct263`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.B.T<T1,(valuetype MyStruct263`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.B.T<T1,(valuetype MyStruct263`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,!!T1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.B.A<(valuetype MyStruct263`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.B.A<(valuetype MyStruct263`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct263.B.B<(valuetype MyStruct263`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct263.B.B<(valuetype MyStruct263`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloca V_3
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ToString() pop
pop
ldloc V_3
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_3
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_3
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloca V_4
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method0()
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method1()
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ClassMethod533()
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type MyStruct263"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ToString() pop
pop
ldloc V_4
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_4
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_4
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_5
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_5
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_6
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_6
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
.try { ldloc V_7
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV12
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12:
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV13
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13:
.try { ldloc V_7
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV14
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV15
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15:
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.B<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV16
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16:
.try { ldloc V_8
ldstr "MyStruct263::Method0.MI.2024()#MyStruct263::Method1.MI.2026()#MyStruct263::Method2.2027<System.Object>()#MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.IBase0<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV17
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17:
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV18
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18:
.try { ldloc V_8
ldstr "MyStruct263::Method4.MI.2019()#MyStruct263::Method5.2020()#MyStruct263::Method6.MI.2022<System.Object>()#"
call void Generated213::M.IBase1.A<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV19
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_9)
ldloca V_9
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
.try { ldloc V_9
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_9
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_9
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.A<valuetype MyStruct263`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_10)
ldloca V_10
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
.try { ldloc V_10
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_10
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_10
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.A.B<valuetype MyStruct263`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_11)
ldloca V_11
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
.try { ldloc V_11
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!2,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_11
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.T<class BaseClass0,valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!1,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_11
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.A<valuetype MyStruct263`2<class BaseClass1,class BaseClass0>>(!!0,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_12)
ldloca V_12
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
.try { ldloc V_12
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!2,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.try { ldloc V_12
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.T<class BaseClass1,valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!1,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_12
ldstr "MyStruct263::Method4.MI.2019()#" +
"MyStruct263::Method5.2020()#" +
"MyStruct263::Method6.MI.2022<System.Object>()#" +
"MyStruct263::Method0.MI.2024()#" +
"MyStruct263::Method1.MI.2026()#" +
"MyStruct263::Method2.2027<System.Object>()#" +
"MyStruct263::Method3.2028<System.Object>()#"
call void Generated213::M.MyStruct263.B.B<valuetype MyStruct263`2<class BaseClass1,class BaseClass1>>(!!0,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass0> V_13)
ldloca V_13
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0> ldnull
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13 box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass0>::ToString() calli default string(object) pop
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldloc V_13
box valuetype MyStruct263`2<class BaseClass0,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct263`2<class BaseClass0,class BaseClass1> V_14)
ldloca V_14
initobj valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1> ldnull
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14 box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass0,class BaseClass1>::ToString() calli default string(object) pop
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldloc V_14
box valuetype MyStruct263`2<class BaseClass0,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass0> V_15)
ldloca V_15
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0> ldnull
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15 box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass0>::ToString() calli default string(object) pop
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldloc V_15
box valuetype MyStruct263`2<class BaseClass1,class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct263`2<class BaseClass1,class BaseClass1> V_16)
ldloca V_16
initobj valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.2018()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.2021<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.2023()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.2025()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ClassMethod533()
calli default string(object)
ldstr "MyStruct263::ClassMethod533.2029()"
ldstr "valuetype MyStruct263`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1> ldnull
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance bool valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16 box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string valuetype MyStruct263`2<class BaseClass1,class BaseClass1>::ToString() calli default string(object) pop
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct263::Method0.MI.2024()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct263::Method1.MI.2026()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct263::Method2.2027<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct263::Method3.2028<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct263::Method4.MI.2019()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct263::Method5.2020()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldloc V_16
box valuetype MyStruct263`2<class BaseClass1,class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct263::Method6.MI.2022<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct263`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated213::MethodCallingTest()
call void Generated213::ConstrainedCallsTest()
call void Generated213::StructConstrainedInterfaceCallsTest()
call void Generated213::CalliTest()
ldc.i4 100
ret
}
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/installer/pkg/sfx/bundle/theme/1029/bundle.wxl | <?xml version="1.0" encoding="utf-8"?>
<WixLocalization Culture="en-us" Language="1033" xmlns="http://schemas.microsoft.com/wix/2006/localization">
<String Id="Caption">Instalační program pro [WixBundleName]</String>
<String Id="Title">[BUNDLEMONIKER]</String>
<String Id="Motto">Potřebujete jenom prostředí, textový editor a 10 minut času.
Jste připraveni? Dejme se tedy do toho!</String>
<String Id="ConfirmCancelMessage">Opravdu chcete akci zrušit?</String>
<String Id="ExecuteUpgradeRelatedBundleMessage">Předchozí verze</String>
<String Id="HelpHeader">Nápověda k instalaci</String>
<String Id="HelpText">/install | /repair | /uninstall | /layout [adresář] – Nainstaluje, opraví, odinstaluje nebo
vytvoří úplnou místní kopii svazku v adresáři. Výchozí možností je instalace.
/passive | /quiet – Zobrazí minimální uživatelské rozhraní bez výzev nebo nezobrazí žádné uživatelské rozhraní a
žádné výzvy. Výchozí možností je zobrazení uživatelského rozhraní a všech výzev.
/norestart – potlačí všechny pokusy o restartování. Ve výchozím nastavení uživatelské rozhraní před restartováním zobrazí výzvu.
/log log.txt – Uloží protokol do konkrétního souboru. Ve výchozím nastavení bude soubor protokolu vytvořen v adresáři %TEMP%.</String>
<String Id="HelpCloseButton">&Zavřít</String>
<String Id="InstallAcceptCheckbox">&Souhlasím s licenčními podmínkami</String>
<String Id="InstallOptionsButton">&Možnosti</String>
<String Id="InstallInstallButton">&Nainstalovat</String>
<String Id="InstallCloseButton">&Zavřít</String>
<String Id="OptionsHeader">Možnosti nastavení</String>
<String Id="OptionsLocationLabel">Umístění instalace:</String>
<String Id="OptionsBrowseButton">&Procházet</String>
<String Id="OptionsOkButton">&OK</String>
<String Id="OptionsCancelButton">&Storno</String>
<String Id="ProgressHeader">Průběh instalace</String>
<String Id="ProgressLabel">Zpracování:</String>
<String Id="OverallProgressPackageText">Inicializuje se...</String>
<String Id="ProgressCancelButton">&Storno</String>
<String Id="ModifyHeader">Změnit instalaci</String>
<String Id="ModifyRepairButton">Op&ravit</String>
<String Id="ModifyUninstallButton">O&dinstalovat</String>
<String Id="ModifyCloseButton">&Zavřít</String>
<String Id="SuccessRepairHeader">Oprava se úspěšně dokončila.</String>
<String Id="SuccessUninstallHeader">Odinstalace se úspěšně dokončila.</String>
<String Id="SuccessInstallHeader">Instalace proběhla úspěšně.</String>
<String Id="SuccessHeader">Instalace byla úspěšná.</String>
<String Id="SuccessLaunchButton">&Spustit</String>
<String Id="SuccessRestartText">Před použitím tohoto softwaru musíte restartovat počítač.</String>
<String Id="SuccessRestartButton">&Restartovat</String>
<String Id="SuccessCloseButton">&Zavřít</String>
<String Id="FailureHeader">Instalace se nezdařila.</String>
<String Id="FailureInstallHeader">Instalace se nepovedla.</String>
<String Id="FailureUninstallHeader">Odinstalace se nepovedla.</String>
<String Id="FailureRepairHeader">Oprava se nepovedla.</String>
<String Id="FailureHyperlinkLogText">Instalace se nepovedla kvůli jednomu nebo víc problémům. Opravte tyto problémy a zkuste software znovu nainstalovat. Další informace najdete v <a href="#">souboru protokolu</a>.</String>
<String Id="FailureRestartText">Pro dokončení vrácení změn tohoto softwaru je potřeba restartovat počítač.</String>
<String Id="FailureRestartButton">&Restartovat</String>
<String Id="FailureCloseButton">&Zavřít</String>
<String Id="FailureNotSupportedCurrentOperatingSystem">[PRODUCT_NAME] se tomto operačním systému nepodporuje. Další informace: [LINK_PREREQ_PAGE]</String>
<String Id="FailureNotSupportedX86OperatingSystem">[PRODUCT_NAME] se v operačních systémech pro platformu x86 nepodporuje. Použijte prosím k instalaci odpovídající instalační program pro platformu x86.</String>
<String Id="FilesInUseHeader">Používané soubory</String>
<String Id="FilesInUseLabel">Následující aplikace používají soubory, které je potřeba aktualizovat:</String>
<String Id="FilesInUseCloseRadioButton">Zavřete &aplikace a zkuste je restartovat.</String>
<String Id="FilesInUseDontCloseRadioButton">A&plikace nezavírejte. Bude potřeba provést restart.</String>
<String Id="FilesInUseOkButton">&OK</String>
<String Id="FilesInUseCancelButton">&Zrušit</String>
<String Id="WelcomeHeaderMessage">Modul runtime .NET</String>
<String Id="WelcomeDescription">Modul .NET Runtime se používá ke spouštění aplikací .NET na počítači s Windows. .NET je open source, k dispozici pro více platforem a podporovaný Microsoftem. Doufáme, že se vám bude líbit!</String>
<String Id="LearnMoreTitle">Další informace o .NET</String>
<String Id="SuccessInstallLocation">Do [DOTNETHOME] se nainstalovaly následující položky.</String>
<String Id="SuccessInstallProductName"> - [BUNDLEMONIKER] </String>
<String Id="ResourcesHeader">Prostředky</String>
<String Id="DocumentationLink"><A HREF="https://aka.ms/dotnet-docs">Dokumentace</A></String>
<String Id="RelaseNotesLink"><A HREF="https://aka.ms/20-p2-rel-notes">Zpráva k vydání verze</A></String>
<String Id="TutorialLink"><A HREF="https://aka.ms/dotnet-tutorials">Kurzy</A></String>
<String Id="TelemetryLink"><A HREF="https://aka.ms/dotnet-cli-telemetry">Telemetrie pro platformu .NET</A></String>
<String Id="PrivacyStatementLink"><A HREF="https://aka.ms/dev-privacy">Prohlášení o zásadách ochrany osobních údajů</A></String>
<String Id="EulaLink"><A HREF="https://aka.ms/dotnet-license-windows">Informace o licencování pro .NET</A></String>
<String Id="LicenseAssent">Kliknutím na Nainstalovat vyjadřujete souhlas s následujícími podmínkami.</String>
</WixLocalization>
| <?xml version="1.0" encoding="utf-8"?>
<WixLocalization Culture="en-us" Language="1033" xmlns="http://schemas.microsoft.com/wix/2006/localization">
<String Id="Caption">Instalační program pro [WixBundleName]</String>
<String Id="Title">[BUNDLEMONIKER]</String>
<String Id="Motto">Potřebujete jenom prostředí, textový editor a 10 minut času.
Jste připraveni? Dejme se tedy do toho!</String>
<String Id="ConfirmCancelMessage">Opravdu chcete akci zrušit?</String>
<String Id="ExecuteUpgradeRelatedBundleMessage">Předchozí verze</String>
<String Id="HelpHeader">Nápověda k instalaci</String>
<String Id="HelpText">/install | /repair | /uninstall | /layout [adresář] – Nainstaluje, opraví, odinstaluje nebo
vytvoří úplnou místní kopii svazku v adresáři. Výchozí možností je instalace.
/passive | /quiet – Zobrazí minimální uživatelské rozhraní bez výzev nebo nezobrazí žádné uživatelské rozhraní a
žádné výzvy. Výchozí možností je zobrazení uživatelského rozhraní a všech výzev.
/norestart – potlačí všechny pokusy o restartování. Ve výchozím nastavení uživatelské rozhraní před restartováním zobrazí výzvu.
/log log.txt – Uloží protokol do konkrétního souboru. Ve výchozím nastavení bude soubor protokolu vytvořen v adresáři %TEMP%.</String>
<String Id="HelpCloseButton">&Zavřít</String>
<String Id="InstallAcceptCheckbox">&Souhlasím s licenčními podmínkami</String>
<String Id="InstallOptionsButton">&Možnosti</String>
<String Id="InstallInstallButton">&Nainstalovat</String>
<String Id="InstallCloseButton">&Zavřít</String>
<String Id="OptionsHeader">Možnosti nastavení</String>
<String Id="OptionsLocationLabel">Umístění instalace:</String>
<String Id="OptionsBrowseButton">&Procházet</String>
<String Id="OptionsOkButton">&OK</String>
<String Id="OptionsCancelButton">&Storno</String>
<String Id="ProgressHeader">Průběh instalace</String>
<String Id="ProgressLabel">Zpracování:</String>
<String Id="OverallProgressPackageText">Inicializuje se...</String>
<String Id="ProgressCancelButton">&Storno</String>
<String Id="ModifyHeader">Změnit instalaci</String>
<String Id="ModifyRepairButton">Op&ravit</String>
<String Id="ModifyUninstallButton">O&dinstalovat</String>
<String Id="ModifyCloseButton">&Zavřít</String>
<String Id="SuccessRepairHeader">Oprava se úspěšně dokončila.</String>
<String Id="SuccessUninstallHeader">Odinstalace se úspěšně dokončila.</String>
<String Id="SuccessInstallHeader">Instalace proběhla úspěšně.</String>
<String Id="SuccessHeader">Instalace byla úspěšná.</String>
<String Id="SuccessLaunchButton">&Spustit</String>
<String Id="SuccessRestartText">Před použitím tohoto softwaru musíte restartovat počítač.</String>
<String Id="SuccessRestartButton">&Restartovat</String>
<String Id="SuccessCloseButton">&Zavřít</String>
<String Id="FailureHeader">Instalace se nezdařila.</String>
<String Id="FailureInstallHeader">Instalace se nepovedla.</String>
<String Id="FailureUninstallHeader">Odinstalace se nepovedla.</String>
<String Id="FailureRepairHeader">Oprava se nepovedla.</String>
<String Id="FailureHyperlinkLogText">Instalace se nepovedla kvůli jednomu nebo víc problémům. Opravte tyto problémy a zkuste software znovu nainstalovat. Další informace najdete v <a href="#">souboru protokolu</a>.</String>
<String Id="FailureRestartText">Pro dokončení vrácení změn tohoto softwaru je potřeba restartovat počítač.</String>
<String Id="FailureRestartButton">&Restartovat</String>
<String Id="FailureCloseButton">&Zavřít</String>
<String Id="FailureNotSupportedCurrentOperatingSystem">[PRODUCT_NAME] se tomto operačním systému nepodporuje. Další informace: [LINK_PREREQ_PAGE]</String>
<String Id="FailureNotSupportedX86OperatingSystem">[PRODUCT_NAME] se v operačních systémech pro platformu x86 nepodporuje. Použijte prosím k instalaci odpovídající instalační program pro platformu x86.</String>
<String Id="FilesInUseHeader">Používané soubory</String>
<String Id="FilesInUseLabel">Následující aplikace používají soubory, které je potřeba aktualizovat:</String>
<String Id="FilesInUseCloseRadioButton">Zavřete &aplikace a zkuste je restartovat.</String>
<String Id="FilesInUseDontCloseRadioButton">A&plikace nezavírejte. Bude potřeba provést restart.</String>
<String Id="FilesInUseOkButton">&OK</String>
<String Id="FilesInUseCancelButton">&Zrušit</String>
<String Id="WelcomeHeaderMessage">Modul runtime .NET</String>
<String Id="WelcomeDescription">Modul .NET Runtime se používá ke spouštění aplikací .NET na počítači s Windows. .NET je open source, k dispozici pro více platforem a podporovaný Microsoftem. Doufáme, že se vám bude líbit!</String>
<String Id="LearnMoreTitle">Další informace o .NET</String>
<String Id="SuccessInstallLocation">Do [DOTNETHOME] se nainstalovaly následující položky.</String>
<String Id="SuccessInstallProductName"> - [BUNDLEMONIKER] </String>
<String Id="ResourcesHeader">Prostředky</String>
<String Id="DocumentationLink"><A HREF="https://aka.ms/dotnet-docs">Dokumentace</A></String>
<String Id="RelaseNotesLink"><A HREF="https://aka.ms/20-p2-rel-notes">Zpráva k vydání verze</A></String>
<String Id="TutorialLink"><A HREF="https://aka.ms/dotnet-tutorials">Kurzy</A></String>
<String Id="TelemetryLink"><A HREF="https://aka.ms/dotnet-cli-telemetry">Telemetrie pro platformu .NET</A></String>
<String Id="PrivacyStatementLink"><A HREF="https://aka.ms/dev-privacy">Prohlášení o zásadách ochrany osobních údajů</A></String>
<String Id="EulaLink"><A HREF="https://aka.ms/dotnet-license-windows">Informace o licencování pro .NET</A></String>
<String Id="LicenseAssent">Kliknutím na Nainstalovat vyjadřujete souhlas s následujícími podmínkami.</String>
</WixLocalization>
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/coreclr/nativeaot/Runtime/CachedInterfaceDispatch.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ==--==
//
// Shared (non-architecture specific) portions of a mechanism to perform interface dispatch using an alternate
// mechanism to VSD that does not require runtime generation of code.
//
// ============================================================================
#ifdef FEATURE_CACHED_INTERFACE_DISPATCH
bool InitializeInterfaceDispatch();
void ReclaimUnusedInterfaceDispatchCaches();
// Interface dispatch caches contain an array of these entries. An instance of a cache is paired with a stub
// that implicitly knows how many entries are contained. These entries must be aligned to twice the alignment
// of a pointer due to the synchonization mechanism used to update them at runtime.
struct InterfaceDispatchCacheEntry
{
MethodTable * m_pInstanceType; // Potential type of the object instance being dispatched on
void * m_pTargetCode; // Method to dispatch to if the actual instance type matches the above
};
// The interface dispatch cache itself. As well as the entries we include the cache size (since logic such as
// cache miss processing needs to determine this value in a synchronized manner, so it can't be contained in
// the owning interface dispatch indirection cell) and a list entry used to link the caches in one of a couple
// of lists related to cache reclamation.
#pragma warning(push)
#pragma warning(disable:4200) // nonstandard extension used: zero-sized array in struct/union
struct InterfaceDispatchCell;
struct InterfaceDispatchCache
{
InterfaceDispatchCacheHeader m_cacheHeader;
union
{
InterfaceDispatchCache * m_pNextFree; // next in free list
#ifndef HOST_AMD64
InterfaceDispatchCell * m_pCell; // pointer back to interface dispatch cell - not used for AMD64
#endif
};
uint32_t m_cEntries;
InterfaceDispatchCacheEntry m_rgEntries[];
};
#pragma warning(pop)
#endif // FEATURE_CACHED_INTERFACE_DISPATCH
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ==--==
//
// Shared (non-architecture specific) portions of a mechanism to perform interface dispatch using an alternate
// mechanism to VSD that does not require runtime generation of code.
//
// ============================================================================
#ifdef FEATURE_CACHED_INTERFACE_DISPATCH
bool InitializeInterfaceDispatch();
void ReclaimUnusedInterfaceDispatchCaches();
// Interface dispatch caches contain an array of these entries. An instance of a cache is paired with a stub
// that implicitly knows how many entries are contained. These entries must be aligned to twice the alignment
// of a pointer due to the synchonization mechanism used to update them at runtime.
struct InterfaceDispatchCacheEntry
{
MethodTable * m_pInstanceType; // Potential type of the object instance being dispatched on
void * m_pTargetCode; // Method to dispatch to if the actual instance type matches the above
};
// The interface dispatch cache itself. As well as the entries we include the cache size (since logic such as
// cache miss processing needs to determine this value in a synchronized manner, so it can't be contained in
// the owning interface dispatch indirection cell) and a list entry used to link the caches in one of a couple
// of lists related to cache reclamation.
#pragma warning(push)
#pragma warning(disable:4200) // nonstandard extension used: zero-sized array in struct/union
struct InterfaceDispatchCell;
struct InterfaceDispatchCache
{
InterfaceDispatchCacheHeader m_cacheHeader;
union
{
InterfaceDispatchCache * m_pNextFree; // next in free list
#ifndef HOST_AMD64
InterfaceDispatchCell * m_pCell; // pointer back to interface dispatch cell - not used for AMD64
#endif
};
uint32_t m_cEntries;
InterfaceDispatchCacheEntry m_rgEntries[];
};
#pragma warning(pop)
#endif // FEATURE_CACHED_INTERFACE_DISPATCH
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/libraries/System.Runtime.Extensions/src/System.Runtime.Extensions.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<IsPartialFacadeAssembly>true</IsPartialFacadeAssembly>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="$(CoreLibProject)" />
<ProjectReference Include="$(LibrariesProjectRoot)System.Private.Uri\src\System.Private.Uri.csproj" />
<ProjectReference Include="$(LibrariesProjectRoot)System.Runtime\src\System.Runtime.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<IsPartialFacadeAssembly>true</IsPartialFacadeAssembly>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="$(CoreLibProject)" />
<ProjectReference Include="$(LibrariesProjectRoot)System.Private.Uri\src\System.Private.Uri.csproj" />
<ProjectReference Include="$(LibrariesProjectRoot)System.Runtime\src\System.Runtime.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/libraries/System.Text.Json/tests/Common/TestClasses/TestClasses.SimpleTestClassWithSimpleObject.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Text.Json.Serialization.Tests
{
public class SimpleTestClassWithSimpleObject : ITestClass
{
public object MyInt16 { get; set; }
public object MyInt32 { get; set; }
public object MyInt64 { get; set; }
public object MyUInt16 { get; set; }
public object MyUInt32 { get; set; }
public object MyUInt64 { get; set; }
public object MyByte { get; set; }
public object MySByte { get; set; }
public object MyChar { get; set; }
public object MyString { get; set; }
public object MyDecimal { get; set; }
public object MyBooleanTrue { get; set; }
public object MyBooleanFalse { get; set; }
public object MySingle { get; set; }
public object MyDouble { get; set; }
public object MyDateTime { get; set; }
public object MyGuid { get; set; }
public object MyEnum { get; set; }
public object MyStruct { get; set; }
public static readonly string s_json =
@"{" +
@"""MyInt16"" : 1," +
@"""MyInt32"" : 2," +
@"""MyInt64"" : 3," +
@"""MyUInt16"" : 4," +
@"""MyUInt32"" : 5," +
@"""MyUInt64"" : 6," +
@"""MyByte"" : 7," +
@"""MySByte"" : 8," +
@"""MyChar"" : ""a""," +
@"""MyString"" : ""Hello""," +
@"""MyBooleanTrue"" : true," +
@"""MyBooleanFalse"" : false," +
@"""MySingle"" : 1.1," +
@"""MyDouble"" : 2.2," +
@"""MyDecimal"" : 3.3," +
@"""MyDateTime"" : ""2019-01-30T12:01:02.0000000Z""," +
@"""MyGuid"" : ""5BB9D872-DA8A-471E-AA70-08E19102683D""," +
@"""MyEnum"" : 2," + // int by default
@"""MyStruct"" : { ""One"" : 1, ""Two"" : 3.14 }" +
@"}";
public static readonly byte[] s_data = Encoding.UTF8.GetBytes(s_json);
private bool _initialized;
public virtual void Initialize()
{
_initialized = true;
MyInt16 = (short)1;
MyInt32 = (int)2;
MyInt64 = (long)3;
MyUInt16 = (ushort)4;
MyUInt32 = (uint)5;
MyUInt64 = (ulong)6;
MyByte = (byte)7;
MySByte = (sbyte)8;
MyChar = 'a';
MyString = "Hello";
MyBooleanTrue = true;
MyBooleanFalse = false;
MySingle = 1.1f;
MyDouble = 2.2d;
MyDecimal = 3.3m;
MyDateTime = new DateTime(2019, 1, 30, 12, 1, 2, DateTimeKind.Utc);
MyGuid = new Guid("5BB9D872-DA8A-471E-AA70-08E19102683D");
MyEnum = SampleEnum.Two;
MyStruct = new SimpleStruct { One = 1, Two = 3.14 };
}
public virtual void Verify()
{
// Shared test logic verifies state after calling Initialize. In the object
// case we don't care if the object is initialized with non JsonElement values,
// they'll still be serialized back in as JsonElement.
if (_initialized)
return;
Assert.IsType<JsonElement>(MyInt16);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyInt16).ValueKind);
Assert.Equal(1, ((JsonElement)MyInt16).GetInt16());
Assert.IsType<JsonElement>(MyInt32);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyInt32).ValueKind);
Assert.Equal(2, ((JsonElement)MyInt32).GetInt32());
Assert.IsType<JsonElement>(MyInt64);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyInt64).ValueKind);
Assert.Equal(3L, ((JsonElement)MyInt64).GetInt64());
Assert.IsType<JsonElement>(MyUInt16);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyUInt16).ValueKind);
Assert.Equal(4u, ((JsonElement)MyUInt16).GetUInt16());
Assert.IsType<JsonElement>(MyUInt32);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyUInt32).ValueKind);
Assert.Equal(5u, ((JsonElement)MyUInt32).GetUInt32());
Assert.IsType<JsonElement>(MyUInt64);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyUInt64).ValueKind);
Assert.Equal(6UL, ((JsonElement)MyUInt64).GetUInt64());
Assert.IsType<JsonElement>(MyByte);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyByte).ValueKind);
Assert.Equal(7, ((JsonElement)MyByte).GetByte());
Assert.IsType<JsonElement>(MySByte);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MySByte).ValueKind);
Assert.Equal(8, ((JsonElement)MySByte).GetSByte());
Assert.IsType<JsonElement>(MyChar);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyChar).ValueKind);
Assert.Equal("a", ((JsonElement)MyChar).GetString());
Assert.IsType<JsonElement>(MyString);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyString).ValueKind);
Assert.Equal("Hello", ((JsonElement)MyString).GetString());
Assert.IsType<JsonElement>(MyDecimal);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyDecimal).ValueKind);
Assert.Equal(3.3m, ((JsonElement)MyDecimal).GetDecimal());
Assert.IsType<JsonElement>(MyBooleanFalse);
Assert.Equal(JsonValueKind.False, ((JsonElement)MyBooleanFalse).ValueKind);
Assert.False(((JsonElement)MyBooleanFalse).GetBoolean());
Assert.IsType<JsonElement>(MyBooleanTrue);
Assert.Equal(JsonValueKind.True, ((JsonElement)MyBooleanTrue).ValueKind);
Assert.True(((JsonElement)MyBooleanTrue).GetBoolean());
Assert.IsType<JsonElement>(MySingle);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MySingle).ValueKind);
Assert.Equal(1.1f, ((JsonElement)MySingle).GetSingle());
Assert.IsType<JsonElement>(MyDouble);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyDouble).ValueKind);
Assert.Equal(2.2d, ((JsonElement)MyDouble).GetDouble());
Assert.IsType<JsonElement>(MyDateTime);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyDateTime).ValueKind);
Assert.Equal(new DateTime(2019, 1, 30, 12, 1, 2, DateTimeKind.Utc), ((JsonElement)MyDateTime).GetDateTime());
Assert.IsType<JsonElement>(MyGuid);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyGuid).ValueKind);
Assert.Equal(new Guid("5BB9D872-DA8A-471E-AA70-08E19102683D"), ((JsonElement)MyGuid).GetGuid());
Assert.IsType<JsonElement>(MyEnum);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyEnum).ValueKind);
Assert.Equal(SampleEnum.Two, (SampleEnum)((JsonElement)MyEnum).GetUInt32());
Assert.Equal(1, ((JsonElement)MyStruct).GetProperty("One").GetInt32());
Assert.Equal(3.14, ((JsonElement)MyStruct).GetProperty("Two").GetDouble());
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Text.Json.Serialization.Tests
{
public class SimpleTestClassWithSimpleObject : ITestClass
{
public object MyInt16 { get; set; }
public object MyInt32 { get; set; }
public object MyInt64 { get; set; }
public object MyUInt16 { get; set; }
public object MyUInt32 { get; set; }
public object MyUInt64 { get; set; }
public object MyByte { get; set; }
public object MySByte { get; set; }
public object MyChar { get; set; }
public object MyString { get; set; }
public object MyDecimal { get; set; }
public object MyBooleanTrue { get; set; }
public object MyBooleanFalse { get; set; }
public object MySingle { get; set; }
public object MyDouble { get; set; }
public object MyDateTime { get; set; }
public object MyGuid { get; set; }
public object MyEnum { get; set; }
public object MyStruct { get; set; }
public static readonly string s_json =
@"{" +
@"""MyInt16"" : 1," +
@"""MyInt32"" : 2," +
@"""MyInt64"" : 3," +
@"""MyUInt16"" : 4," +
@"""MyUInt32"" : 5," +
@"""MyUInt64"" : 6," +
@"""MyByte"" : 7," +
@"""MySByte"" : 8," +
@"""MyChar"" : ""a""," +
@"""MyString"" : ""Hello""," +
@"""MyBooleanTrue"" : true," +
@"""MyBooleanFalse"" : false," +
@"""MySingle"" : 1.1," +
@"""MyDouble"" : 2.2," +
@"""MyDecimal"" : 3.3," +
@"""MyDateTime"" : ""2019-01-30T12:01:02.0000000Z""," +
@"""MyGuid"" : ""5BB9D872-DA8A-471E-AA70-08E19102683D""," +
@"""MyEnum"" : 2," + // int by default
@"""MyStruct"" : { ""One"" : 1, ""Two"" : 3.14 }" +
@"}";
public static readonly byte[] s_data = Encoding.UTF8.GetBytes(s_json);
private bool _initialized;
public virtual void Initialize()
{
_initialized = true;
MyInt16 = (short)1;
MyInt32 = (int)2;
MyInt64 = (long)3;
MyUInt16 = (ushort)4;
MyUInt32 = (uint)5;
MyUInt64 = (ulong)6;
MyByte = (byte)7;
MySByte = (sbyte)8;
MyChar = 'a';
MyString = "Hello";
MyBooleanTrue = true;
MyBooleanFalse = false;
MySingle = 1.1f;
MyDouble = 2.2d;
MyDecimal = 3.3m;
MyDateTime = new DateTime(2019, 1, 30, 12, 1, 2, DateTimeKind.Utc);
MyGuid = new Guid("5BB9D872-DA8A-471E-AA70-08E19102683D");
MyEnum = SampleEnum.Two;
MyStruct = new SimpleStruct { One = 1, Two = 3.14 };
}
public virtual void Verify()
{
// Shared test logic verifies state after calling Initialize. In the object
// case we don't care if the object is initialized with non JsonElement values,
// they'll still be serialized back in as JsonElement.
if (_initialized)
return;
Assert.IsType<JsonElement>(MyInt16);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyInt16).ValueKind);
Assert.Equal(1, ((JsonElement)MyInt16).GetInt16());
Assert.IsType<JsonElement>(MyInt32);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyInt32).ValueKind);
Assert.Equal(2, ((JsonElement)MyInt32).GetInt32());
Assert.IsType<JsonElement>(MyInt64);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyInt64).ValueKind);
Assert.Equal(3L, ((JsonElement)MyInt64).GetInt64());
Assert.IsType<JsonElement>(MyUInt16);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyUInt16).ValueKind);
Assert.Equal(4u, ((JsonElement)MyUInt16).GetUInt16());
Assert.IsType<JsonElement>(MyUInt32);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyUInt32).ValueKind);
Assert.Equal(5u, ((JsonElement)MyUInt32).GetUInt32());
Assert.IsType<JsonElement>(MyUInt64);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyUInt64).ValueKind);
Assert.Equal(6UL, ((JsonElement)MyUInt64).GetUInt64());
Assert.IsType<JsonElement>(MyByte);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyByte).ValueKind);
Assert.Equal(7, ((JsonElement)MyByte).GetByte());
Assert.IsType<JsonElement>(MySByte);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MySByte).ValueKind);
Assert.Equal(8, ((JsonElement)MySByte).GetSByte());
Assert.IsType<JsonElement>(MyChar);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyChar).ValueKind);
Assert.Equal("a", ((JsonElement)MyChar).GetString());
Assert.IsType<JsonElement>(MyString);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyString).ValueKind);
Assert.Equal("Hello", ((JsonElement)MyString).GetString());
Assert.IsType<JsonElement>(MyDecimal);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyDecimal).ValueKind);
Assert.Equal(3.3m, ((JsonElement)MyDecimal).GetDecimal());
Assert.IsType<JsonElement>(MyBooleanFalse);
Assert.Equal(JsonValueKind.False, ((JsonElement)MyBooleanFalse).ValueKind);
Assert.False(((JsonElement)MyBooleanFalse).GetBoolean());
Assert.IsType<JsonElement>(MyBooleanTrue);
Assert.Equal(JsonValueKind.True, ((JsonElement)MyBooleanTrue).ValueKind);
Assert.True(((JsonElement)MyBooleanTrue).GetBoolean());
Assert.IsType<JsonElement>(MySingle);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MySingle).ValueKind);
Assert.Equal(1.1f, ((JsonElement)MySingle).GetSingle());
Assert.IsType<JsonElement>(MyDouble);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyDouble).ValueKind);
Assert.Equal(2.2d, ((JsonElement)MyDouble).GetDouble());
Assert.IsType<JsonElement>(MyDateTime);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyDateTime).ValueKind);
Assert.Equal(new DateTime(2019, 1, 30, 12, 1, 2, DateTimeKind.Utc), ((JsonElement)MyDateTime).GetDateTime());
Assert.IsType<JsonElement>(MyGuid);
Assert.Equal(JsonValueKind.String, ((JsonElement)MyGuid).ValueKind);
Assert.Equal(new Guid("5BB9D872-DA8A-471E-AA70-08E19102683D"), ((JsonElement)MyGuid).GetGuid());
Assert.IsType<JsonElement>(MyEnum);
Assert.Equal(JsonValueKind.Number, ((JsonElement)MyEnum).ValueKind);
Assert.Equal(SampleEnum.Two, (SampleEnum)((JsonElement)MyEnum).GetUInt32());
Assert.Equal(1, ((JsonElement)MyStruct).GetProperty("One").GetInt32());
Assert.Equal(3.14, ((JsonElement)MyStruct).GetProperty("Two").GetDouble());
}
}
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/libraries/System.Linq/tests/OrderByDescendingTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using Xunit;
namespace System.Linq.Tests
{
public class OrderByDescendingTests : EnumerableTests
{
[Fact]
public void SameResultsRepeatCallsIntQuery()
{
var q = from x1 in new int[] { 1, 6, 0, -1, 3 }
from x2 in new int[] { 55, 49, 9, -100, 24, 25 }
select new { a1 = x1, a2 = x2 };
Assert.Equal(q.OrderByDescending(e => e.a1), q.OrderByDescending(e => e.a1));
}
[Fact]
public void SameResultsRepeatCallsStringQuery()
{
var q = from x1 in new[] { 55, 49, 9, -100, 24, 25, -1, 0 }
from x2 in new[] { "!@#$%^", "C", "AAA", "", null, "Calling Twice", "SoS", string.Empty }
where !string.IsNullOrEmpty(x2)
select new { a1 = x1, a2 = x2 };
Assert.Equal(q.OrderByDescending(e => e.a1).ThenBy(f => f.a2), q.OrderByDescending(e => e.a1).ThenBy(f => f.a2));
}
[Fact]
public void SourceEmpty()
{
int[] source = { };
Assert.Empty(source.OrderByDescending(e => e));
}
[Fact]
public void KeySelectorReturnsNull()
{
int?[] source = { null, null, null };
int?[] expected = { null, null, null };
Assert.Equal(expected, source.OrderByDescending(e => e));
}
[Fact]
public void ElementsAllSameKey()
{
int?[] source = { 9, 9, 9, 9, 9, 9 };
int?[] expected = { 9, 9, 9, 9, 9, 9 };
Assert.Equal(expected, source.OrderByDescending(e => e));
}
[Fact]
public void KeySelectorCalled()
{
var source = new[]
{
new { Name = "Alpha", Score = 90 },
new { Name = "Robert", Score = 45 },
new { Name = "Prakash", Score = 99 },
new { Name = "Bob", Score = 0 }
};
var expected = new[]
{
new { Name = "Robert", Score = 45 },
new { Name = "Prakash", Score = 99 },
new { Name = "Bob", Score = 0 },
new { Name = "Alpha", Score = 90 }
};
Assert.Equal(expected, source.OrderByDescending(e => e.Name, null));
}
[Fact]
public void FirstAndLastAreDuplicatesCustomComparer()
{
string[] source = { "Prakash", "Alpha", "DAN", "dan", "Prakash" };
string[] expected = { "Prakash", "Prakash", "DAN", "dan", "Alpha" };
Assert.Equal(expected, source.OrderByDescending(e => e, StringComparer.OrdinalIgnoreCase));
}
[Fact]
public void RunOnce()
{
string[] source = { "Prakash", "Alpha", "DAN", "dan", "Prakash" };
string[] expected = { "Prakash", "Prakash", "DAN", "dan", "Alpha" };
Assert.Equal(expected, source.RunOnce().OrderByDescending(e => e, StringComparer.OrdinalIgnoreCase));
}
[Fact]
public void FirstAndLastAreDuplicatesNullPassedAsComparer()
{
int[] source = { 5, 1, 3, 2, 5 };
int[] expected = { 5, 5, 3, 2, 1 };
Assert.Equal(expected, source.OrderByDescending(e => e, null));
}
[Fact]
public void SourceReverseOfResultNullPassedAsComparer()
{
int[] source = { -75, -50, 0, 5, 9, 30, 100 };
int[] expected = { 100, 30, 9, 5, 0, -50, -75 };
Assert.Equal(expected, source.OrderByDescending(e => e, null));
}
[Fact]
public void SameKeysVerifySortStable()
{
var source = new[]
{
new { Name = "Alpha", Score = 90 },
new { Name = "Robert", Score = 45 },
new { Name = "Prakash", Score = 99 },
new { Name = "Bob", Score = 90 },
new { Name = "Thomas", Score = 45 },
new { Name = "Tim", Score = 45 },
new { Name = "Mark", Score = 45 },
};
var expected = new[]
{
new { Name = "Prakash", Score = 99 },
new { Name = "Alpha", Score = 90 },
new { Name = "Bob", Score = 90 },
new { Name = "Robert", Score = 45 },
new { Name = "Thomas", Score = 45 },
new { Name = "Tim", Score = 45 },
new { Name = "Mark", Score = 45 },
};
Assert.Equal(expected, source.OrderByDescending(e => e.Score));
}
private class ExtremeComparer : IComparer<int>
{
public int Compare(int x, int y)
{
if (x == y)
return 0;
if (x < y)
return int.MinValue;
return int.MaxValue;
}
}
[Fact]
public void OrderByExtremeComparer()
{
int[] outOfOrder = new[] { 7, 1, 0, 9, 3, 5, 4, 2, 8, 6 };
// The .NET Framework has a bug where the input is incorrectly ordered if the comparer
// returns int.MaxValue or int.MinValue. See https://github.com/dotnet/corefx/pull/2240.
IEnumerable<int> ordered = outOfOrder.OrderByDescending(i => i, new ExtremeComparer()).ToArray();
Assert.Equal(Enumerable.Range(0, 10).Reverse(), ordered);
}
[Fact]
public void NullSource()
{
IEnumerable<int> source = null;
AssertExtensions.Throws<ArgumentNullException>("source", () => source.OrderByDescending(i => i));
}
[Fact]
public void NullKeySelector()
{
Func<DateTime, int> keySelector = null;
AssertExtensions.Throws<ArgumentNullException>("keySelector", () => Enumerable.Empty<DateTime>().OrderByDescending(keySelector));
}
[Fact]
public void SortsLargeAscendingEnumerableCorrectly()
{
const int Items = 1_000_000;
IEnumerable<int> expected = NumberRangeGuaranteedNotCollectionType(0, Items);
IEnumerable<int> unordered = expected.Select(i => i);
IOrderedEnumerable<int> ordered = unordered.OrderByDescending(i => -i);
Assert.Equal(expected, ordered);
}
[Fact]
public void SortsLargeDescendingEnumerableCorrectly()
{
const int Items = 1_000_000;
IEnumerable<int> expected = NumberRangeGuaranteedNotCollectionType(0, Items);
IEnumerable<int> unordered = expected.Select(i => Items - i - 1);
IOrderedEnumerable<int> ordered = unordered.OrderByDescending(i => -i);
Assert.Equal(expected, ordered);
}
[Theory]
[InlineData(0)]
[InlineData(1)]
[InlineData(2)]
[InlineData(3)]
[InlineData(8)]
[InlineData(16)]
[InlineData(1024)]
[InlineData(4096)]
[InlineData(1_000_000)]
public void SortsRandomizedEnumerableCorrectly(int items)
{
var r = new Random(42);
int[] randomized = Enumerable.Range(0, items).Select(i => r.Next()).ToArray();
int[] ordered = ForceNotCollection(randomized).OrderByDescending(i => -i).ToArray();
Array.Sort(randomized, (a, b) => a - b);
Assert.Equal(randomized, ordered);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using Xunit;
namespace System.Linq.Tests
{
public class OrderByDescendingTests : EnumerableTests
{
[Fact]
public void SameResultsRepeatCallsIntQuery()
{
var q = from x1 in new int[] { 1, 6, 0, -1, 3 }
from x2 in new int[] { 55, 49, 9, -100, 24, 25 }
select new { a1 = x1, a2 = x2 };
Assert.Equal(q.OrderByDescending(e => e.a1), q.OrderByDescending(e => e.a1));
}
[Fact]
public void SameResultsRepeatCallsStringQuery()
{
var q = from x1 in new[] { 55, 49, 9, -100, 24, 25, -1, 0 }
from x2 in new[] { "!@#$%^", "C", "AAA", "", null, "Calling Twice", "SoS", string.Empty }
where !string.IsNullOrEmpty(x2)
select new { a1 = x1, a2 = x2 };
Assert.Equal(q.OrderByDescending(e => e.a1).ThenBy(f => f.a2), q.OrderByDescending(e => e.a1).ThenBy(f => f.a2));
}
[Fact]
public void SourceEmpty()
{
int[] source = { };
Assert.Empty(source.OrderByDescending(e => e));
}
[Fact]
public void KeySelectorReturnsNull()
{
int?[] source = { null, null, null };
int?[] expected = { null, null, null };
Assert.Equal(expected, source.OrderByDescending(e => e));
}
[Fact]
public void ElementsAllSameKey()
{
int?[] source = { 9, 9, 9, 9, 9, 9 };
int?[] expected = { 9, 9, 9, 9, 9, 9 };
Assert.Equal(expected, source.OrderByDescending(e => e));
}
[Fact]
public void KeySelectorCalled()
{
var source = new[]
{
new { Name = "Alpha", Score = 90 },
new { Name = "Robert", Score = 45 },
new { Name = "Prakash", Score = 99 },
new { Name = "Bob", Score = 0 }
};
var expected = new[]
{
new { Name = "Robert", Score = 45 },
new { Name = "Prakash", Score = 99 },
new { Name = "Bob", Score = 0 },
new { Name = "Alpha", Score = 90 }
};
Assert.Equal(expected, source.OrderByDescending(e => e.Name, null));
}
[Fact]
public void FirstAndLastAreDuplicatesCustomComparer()
{
string[] source = { "Prakash", "Alpha", "DAN", "dan", "Prakash" };
string[] expected = { "Prakash", "Prakash", "DAN", "dan", "Alpha" };
Assert.Equal(expected, source.OrderByDescending(e => e, StringComparer.OrdinalIgnoreCase));
}
[Fact]
public void RunOnce()
{
string[] source = { "Prakash", "Alpha", "DAN", "dan", "Prakash" };
string[] expected = { "Prakash", "Prakash", "DAN", "dan", "Alpha" };
Assert.Equal(expected, source.RunOnce().OrderByDescending(e => e, StringComparer.OrdinalIgnoreCase));
}
[Fact]
public void FirstAndLastAreDuplicatesNullPassedAsComparer()
{
int[] source = { 5, 1, 3, 2, 5 };
int[] expected = { 5, 5, 3, 2, 1 };
Assert.Equal(expected, source.OrderByDescending(e => e, null));
}
[Fact]
public void SourceReverseOfResultNullPassedAsComparer()
{
int[] source = { -75, -50, 0, 5, 9, 30, 100 };
int[] expected = { 100, 30, 9, 5, 0, -50, -75 };
Assert.Equal(expected, source.OrderByDescending(e => e, null));
}
[Fact]
public void SameKeysVerifySortStable()
{
var source = new[]
{
new { Name = "Alpha", Score = 90 },
new { Name = "Robert", Score = 45 },
new { Name = "Prakash", Score = 99 },
new { Name = "Bob", Score = 90 },
new { Name = "Thomas", Score = 45 },
new { Name = "Tim", Score = 45 },
new { Name = "Mark", Score = 45 },
};
var expected = new[]
{
new { Name = "Prakash", Score = 99 },
new { Name = "Alpha", Score = 90 },
new { Name = "Bob", Score = 90 },
new { Name = "Robert", Score = 45 },
new { Name = "Thomas", Score = 45 },
new { Name = "Tim", Score = 45 },
new { Name = "Mark", Score = 45 },
};
Assert.Equal(expected, source.OrderByDescending(e => e.Score));
}
private class ExtremeComparer : IComparer<int>
{
public int Compare(int x, int y)
{
if (x == y)
return 0;
if (x < y)
return int.MinValue;
return int.MaxValue;
}
}
[Fact]
public void OrderByExtremeComparer()
{
int[] outOfOrder = new[] { 7, 1, 0, 9, 3, 5, 4, 2, 8, 6 };
// The .NET Framework has a bug where the input is incorrectly ordered if the comparer
// returns int.MaxValue or int.MinValue. See https://github.com/dotnet/corefx/pull/2240.
IEnumerable<int> ordered = outOfOrder.OrderByDescending(i => i, new ExtremeComparer()).ToArray();
Assert.Equal(Enumerable.Range(0, 10).Reverse(), ordered);
}
[Fact]
public void NullSource()
{
IEnumerable<int> source = null;
AssertExtensions.Throws<ArgumentNullException>("source", () => source.OrderByDescending(i => i));
}
[Fact]
public void NullKeySelector()
{
Func<DateTime, int> keySelector = null;
AssertExtensions.Throws<ArgumentNullException>("keySelector", () => Enumerable.Empty<DateTime>().OrderByDescending(keySelector));
}
[Fact]
public void SortsLargeAscendingEnumerableCorrectly()
{
const int Items = 1_000_000;
IEnumerable<int> expected = NumberRangeGuaranteedNotCollectionType(0, Items);
IEnumerable<int> unordered = expected.Select(i => i);
IOrderedEnumerable<int> ordered = unordered.OrderByDescending(i => -i);
Assert.Equal(expected, ordered);
}
[Fact]
public void SortsLargeDescendingEnumerableCorrectly()
{
const int Items = 1_000_000;
IEnumerable<int> expected = NumberRangeGuaranteedNotCollectionType(0, Items);
IEnumerable<int> unordered = expected.Select(i => Items - i - 1);
IOrderedEnumerable<int> ordered = unordered.OrderByDescending(i => -i);
Assert.Equal(expected, ordered);
}
[Theory]
[InlineData(0)]
[InlineData(1)]
[InlineData(2)]
[InlineData(3)]
[InlineData(8)]
[InlineData(16)]
[InlineData(1024)]
[InlineData(4096)]
[InlineData(1_000_000)]
public void SortsRandomizedEnumerableCorrectly(int items)
{
var r = new Random(42);
int[] randomized = Enumerable.Range(0, items).Select(i => r.Next()).ToArray();
int[] ordered = ForceNotCollection(randomized).OrderByDescending(i => -i).ToArray();
Array.Sort(randomized, (a, b) => a - b);
Assert.Equal(randomized, ordered);
}
}
}
| -1 |
dotnet/runtime | 66,245 | JIT: Optimize movzx after setcc | Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | EgorBo | 2022-03-05T17:18:25Z | 2022-03-07T23:22:14Z | 440dfe4a7beecd7755767aa247f47af00b119383 | 5635905f134a3329a15112bd4975acef3f661eb2 | JIT: Optimize movzx after setcc. Clear target reg via `xor reg, reg` instead of relying on zero-extend after SETCC which is heavier and slower
```csharp
bool Test(int x) => x == 42;
```
```diff
; Method Test(int):bool:this
G_M55728_IG01: ;; offset=0000H
G_M55728_IG02: ;; offset=0000H
+ 33C0 xor eax, eax
83FA2A cmp edx, 42
0F94C0 sete al
- 0FB6C0 movzx rax, al
G_M55728_IG03: ;; offset=0009H
C3 ret
-; Total bytes of code: 10
+; Total bytes of code: 9
```
Nice diffs:
```
benchmarks.run.Linux.x64.checked.mch:
Total bytes of delta: -5230 (-0.03 % of base)
coreclr_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -210860 (-0.16 % of base)
libraries.crossgen2.Linux.x64.checked.mch:
Total bytes of delta: -4420 (-0.06 % of base)
libraries.pmi.Linux.x64.checked.mch:
Total bytes of delta: -25312 (-0.05 % of base)
libraries_tests.pmi.Linux.x64.checked.mch:
Total bytes of delta: -49314 (-0.04 % of base)
``` | ./src/tests/JIT/Methodical/MDArray/DataTypes/decimal_cs_d.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="decimal.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="decimal.cs" />
</ItemGroup>
</Project>
| -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.